Spaces:
Runtime error
Runtime error
Delete open_clip
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- open_clip/.github/workflows/ci.yml +0 -141
- open_clip/.github/workflows/clear-cache.yml +0 -29
- open_clip/.github/workflows/python-publish.yml +0 -37
- open_clip/.gitignore +0 -153
- open_clip/CITATION.cff +0 -33
- open_clip/HISTORY.md +0 -176
- open_clip/LICENSE +0 -23
- open_clip/MANIFEST.in +0 -3
- open_clip/Makefile +0 -12
- open_clip/README.md +0 -798
- open_clip/docs/CLIP.png +0 -0
- open_clip/docs/Interacting_with_open_clip.ipynb +0 -0
- open_clip/docs/Interacting_with_open_coca.ipynb +0 -118
- open_clip/docs/clip_conceptual_captions.md +0 -13
- open_clip/docs/clip_loss.png +0 -0
- open_clip/docs/clip_recall.png +0 -0
- open_clip/docs/clip_val_loss.png +0 -0
- open_clip/docs/clip_zeroshot.png +0 -0
- open_clip/docs/effective_robustness.png +0 -3
- open_clip/docs/laion2b_clip_zeroshot_b32.png +0 -0
- open_clip/docs/laion_clip_zeroshot.png +0 -0
- open_clip/docs/laion_clip_zeroshot_b16.png +0 -0
- open_clip/docs/laion_clip_zeroshot_b16_plus_240.png +0 -0
- open_clip/docs/laion_clip_zeroshot_l14.png +0 -0
- open_clip/docs/laion_openai_compare_b32.jpg +0 -0
- open_clip/docs/scaling.png +0 -0
- open_clip/docs/script_examples/stability_example.sh +0 -60
- open_clip/pytest.ini +0 -3
- open_clip/requirements-test.txt +0 -4
- open_clip/requirements-training.txt +0 -12
- open_clip/requirements.txt +0 -9
- open_clip/setup.py +0 -61
- open_clip/src/open_clip/__init__.py +0 -15
- open_clip/src/open_clip/bpe_simple_vocab_16e6.txt.gz +0 -3
- open_clip/src/open_clip/coca_model.py +0 -458
- open_clip/src/open_clip/constants.py +0 -2
- open_clip/src/open_clip/factory.py +0 -366
- open_clip/src/open_clip/generation_utils.py +0 -0
- open_clip/src/open_clip/hf_configs.py +0 -56
- open_clip/src/open_clip/hf_model.py +0 -193
- open_clip/src/open_clip/loss.py +0 -212
- open_clip/src/open_clip/model.py +0 -448
- open_clip/src/open_clip/model_configs/RN101-quickgelu.json +0 -22
- open_clip/src/open_clip/model_configs/RN101.json +0 -21
- open_clip/src/open_clip/model_configs/RN50-quickgelu.json +0 -22
- open_clip/src/open_clip/model_configs/RN50.json +0 -21
- open_clip/src/open_clip/model_configs/RN50x16.json +0 -21
- open_clip/src/open_clip/model_configs/RN50x4.json +0 -21
- open_clip/src/open_clip/model_configs/RN50x64.json +0 -21
- open_clip/src/open_clip/model_configs/ViT-B-16-plus-240.json +0 -16
open_clip/.github/workflows/ci.yml
DELETED
|
@@ -1,141 +0,0 @@
|
|
| 1 |
-
name: Continuous integration
|
| 2 |
-
|
| 3 |
-
on:
|
| 4 |
-
push:
|
| 5 |
-
branches:
|
| 6 |
-
- main
|
| 7 |
-
paths-ignore:
|
| 8 |
-
- '**.md'
|
| 9 |
-
- 'CITATION.cff'
|
| 10 |
-
- 'LICENSE'
|
| 11 |
-
- '.gitignore'
|
| 12 |
-
- 'docs/**'
|
| 13 |
-
pull_request:
|
| 14 |
-
branches:
|
| 15 |
-
- main
|
| 16 |
-
paths-ignore:
|
| 17 |
-
- '**.md'
|
| 18 |
-
- 'CITATION.cff'
|
| 19 |
-
- 'LICENSE'
|
| 20 |
-
- '.gitignore'
|
| 21 |
-
- 'docs/**'
|
| 22 |
-
workflow_dispatch:
|
| 23 |
-
inputs:
|
| 24 |
-
manual_revision_reference:
|
| 25 |
-
required: false
|
| 26 |
-
type: string
|
| 27 |
-
manual_revision_test:
|
| 28 |
-
required: false
|
| 29 |
-
type: string
|
| 30 |
-
|
| 31 |
-
env:
|
| 32 |
-
REVISION_REFERENCE: v2.8.2
|
| 33 |
-
#9d31b2ec4df6d8228f370ff20c8267ec6ba39383 earliest compatible v2.7.0 + pretrained_hf param
|
| 34 |
-
|
| 35 |
-
jobs:
|
| 36 |
-
Tests:
|
| 37 |
-
strategy:
|
| 38 |
-
matrix:
|
| 39 |
-
os: [ ubuntu-latest ] #, macos-latest ]
|
| 40 |
-
python: [ 3.8 ]
|
| 41 |
-
job_num: [ 4 ]
|
| 42 |
-
job: [ 1, 2, 3, 4 ]
|
| 43 |
-
runs-on: ${{ matrix.os }}
|
| 44 |
-
steps:
|
| 45 |
-
- uses: actions/checkout@v3
|
| 46 |
-
with:
|
| 47 |
-
fetch-depth: 0
|
| 48 |
-
ref: ${{ inputs.manual_revision_test }}
|
| 49 |
-
- name: Set up Python ${{ matrix.python }}
|
| 50 |
-
id: pythonsetup
|
| 51 |
-
uses: actions/setup-python@v4
|
| 52 |
-
with:
|
| 53 |
-
python-version: ${{ matrix.python }}
|
| 54 |
-
- name: Venv cache
|
| 55 |
-
id: venv-cache
|
| 56 |
-
uses: actions/cache@v3
|
| 57 |
-
with:
|
| 58 |
-
path: .env
|
| 59 |
-
key: venv-${{ matrix.os }}-${{ steps.pythonsetup.outputs.python-version }}-${{ hashFiles('requirements*') }}
|
| 60 |
-
- name: Pytest durations cache
|
| 61 |
-
uses: actions/cache@v3
|
| 62 |
-
with:
|
| 63 |
-
path: .test_durations
|
| 64 |
-
key: test_durations-${{ matrix.os }}-${{ steps.pythonsetup.outputs.python-version }}-${{ matrix.job }}-${{ github.run_id }}
|
| 65 |
-
restore-keys: test_durations-0-
|
| 66 |
-
- name: Setup
|
| 67 |
-
if: steps.venv-cache.outputs.cache-hit != 'true'
|
| 68 |
-
run: |
|
| 69 |
-
python3 -m venv .env
|
| 70 |
-
source .env/bin/activate
|
| 71 |
-
make install
|
| 72 |
-
make install-test
|
| 73 |
-
make install-training
|
| 74 |
-
- name: Prepare test data
|
| 75 |
-
run: |
|
| 76 |
-
source .env/bin/activate
|
| 77 |
-
python -m pytest \
|
| 78 |
-
--quiet --co \
|
| 79 |
-
--splitting-algorithm least_duration \
|
| 80 |
-
--splits ${{ matrix.job_num }} \
|
| 81 |
-
--group ${{ matrix.job }} \
|
| 82 |
-
-m regression_test \
|
| 83 |
-
tests \
|
| 84 |
-
| head -n -2 | grep -Po 'test_inference_with_data\[\K[^]]*(?=-False]|-True])' \
|
| 85 |
-
> models_gh_runner.txt
|
| 86 |
-
if [ -n "${{ inputs.manual_revision_reference }}" ]; then
|
| 87 |
-
REVISION_REFERENCE=${{ inputs.manual_revision_reference }}
|
| 88 |
-
fi
|
| 89 |
-
python tests/util_test.py \
|
| 90 |
-
--save_model_list models_gh_runner.txt \
|
| 91 |
-
--model_list models_gh_runner.txt \
|
| 92 |
-
--git_revision $REVISION_REFERENCE
|
| 93 |
-
- name: Unit tests
|
| 94 |
-
run: |
|
| 95 |
-
source .env/bin/activate
|
| 96 |
-
touch .test_durations
|
| 97 |
-
cp .test_durations durations_1
|
| 98 |
-
mv .test_durations durations_2
|
| 99 |
-
python -m pytest \
|
| 100 |
-
-x -s -v \
|
| 101 |
-
--splitting-algorithm least_duration \
|
| 102 |
-
--splits ${{ matrix.job_num }} \
|
| 103 |
-
--group ${{ matrix.job }} \
|
| 104 |
-
--store-durations \
|
| 105 |
-
--durations-path durations_1 \
|
| 106 |
-
--clean-durations \
|
| 107 |
-
-m "not regression_test" \
|
| 108 |
-
tests
|
| 109 |
-
OPEN_CLIP_TEST_REG_MODELS=models_gh_runner.txt python -m pytest \
|
| 110 |
-
-x -s -v \
|
| 111 |
-
--store-durations \
|
| 112 |
-
--durations-path durations_2 \
|
| 113 |
-
--clean-durations \
|
| 114 |
-
-m "regression_test" \
|
| 115 |
-
tests
|
| 116 |
-
jq -s -S 'add' durations_* > .test_durations
|
| 117 |
-
- name: Collect pytest durations
|
| 118 |
-
uses: actions/upload-artifact@v3
|
| 119 |
-
with:
|
| 120 |
-
name: pytest_durations_${{ matrix.os }}-${{ matrix.python }}-${{ matrix.job }}
|
| 121 |
-
path: .test_durations
|
| 122 |
-
|
| 123 |
-
Collect:
|
| 124 |
-
needs: Tests
|
| 125 |
-
runs-on: ubuntu-latest
|
| 126 |
-
steps:
|
| 127 |
-
- name: Cache
|
| 128 |
-
uses: actions/cache@v3
|
| 129 |
-
with:
|
| 130 |
-
path: .test_durations
|
| 131 |
-
key: test_durations-0-${{ github.run_id }}
|
| 132 |
-
- name: Collect
|
| 133 |
-
uses: actions/download-artifact@v3
|
| 134 |
-
with:
|
| 135 |
-
path: artifacts
|
| 136 |
-
- name: Consolidate
|
| 137 |
-
run: |
|
| 138 |
-
jq -n -S \
|
| 139 |
-
'reduce (inputs | to_entries[]) as {$key, $value} ({}; .[$key] += $value)' \
|
| 140 |
-
artifacts/pytest_durations_*/.test_durations > .test_durations
|
| 141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/.github/workflows/clear-cache.yml
DELETED
|
@@ -1,29 +0,0 @@
|
|
| 1 |
-
name: Clear cache
|
| 2 |
-
|
| 3 |
-
on:
|
| 4 |
-
workflow_dispatch:
|
| 5 |
-
|
| 6 |
-
permissions:
|
| 7 |
-
actions: write
|
| 8 |
-
|
| 9 |
-
jobs:
|
| 10 |
-
clear-cache:
|
| 11 |
-
runs-on: ubuntu-latest
|
| 12 |
-
steps:
|
| 13 |
-
- name: Clear cache
|
| 14 |
-
uses: actions/github-script@v6
|
| 15 |
-
with:
|
| 16 |
-
script: |
|
| 17 |
-
const caches = await github.rest.actions.getActionsCacheList({
|
| 18 |
-
owner: context.repo.owner,
|
| 19 |
-
repo: context.repo.repo,
|
| 20 |
-
})
|
| 21 |
-
for (const cache of caches.data.actions_caches) {
|
| 22 |
-
console.log(cache)
|
| 23 |
-
await github.rest.actions.deleteActionsCacheById({
|
| 24 |
-
owner: context.repo.owner,
|
| 25 |
-
repo: context.repo.repo,
|
| 26 |
-
cache_id: cache.id,
|
| 27 |
-
})
|
| 28 |
-
}
|
| 29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/.github/workflows/python-publish.yml
DELETED
|
@@ -1,37 +0,0 @@
|
|
| 1 |
-
name: Release
|
| 2 |
-
|
| 3 |
-
on:
|
| 4 |
-
push:
|
| 5 |
-
branches:
|
| 6 |
-
- main
|
| 7 |
-
jobs:
|
| 8 |
-
deploy:
|
| 9 |
-
runs-on: ubuntu-latest
|
| 10 |
-
steps:
|
| 11 |
-
- uses: actions/checkout@v2
|
| 12 |
-
- uses: actions-ecosystem/action-regex-match@v2
|
| 13 |
-
id: regex-match
|
| 14 |
-
with:
|
| 15 |
-
text: ${{ github.event.head_commit.message }}
|
| 16 |
-
regex: '^Release ([^ ]+)'
|
| 17 |
-
- name: Set up Python
|
| 18 |
-
uses: actions/setup-python@v2
|
| 19 |
-
with:
|
| 20 |
-
python-version: '3.8'
|
| 21 |
-
- name: Install dependencies
|
| 22 |
-
run: |
|
| 23 |
-
python -m pip install --upgrade pip
|
| 24 |
-
pip install setuptools wheel twine
|
| 25 |
-
- name: Release
|
| 26 |
-
if: ${{ steps.regex-match.outputs.match != '' }}
|
| 27 |
-
uses: softprops/action-gh-release@v1
|
| 28 |
-
with:
|
| 29 |
-
tag_name: v${{ steps.regex-match.outputs.group1 }}
|
| 30 |
-
- name: Build and publish
|
| 31 |
-
if: ${{ steps.regex-match.outputs.match != '' }}
|
| 32 |
-
env:
|
| 33 |
-
TWINE_USERNAME: __token__
|
| 34 |
-
TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
|
| 35 |
-
run: |
|
| 36 |
-
python setup.py sdist bdist_wheel
|
| 37 |
-
twine upload dist/*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/.gitignore
DELETED
|
@@ -1,153 +0,0 @@
|
|
| 1 |
-
logs/
|
| 2 |
-
wandb/
|
| 3 |
-
models/
|
| 4 |
-
features/
|
| 5 |
-
results/
|
| 6 |
-
|
| 7 |
-
tests/data/
|
| 8 |
-
*.pt
|
| 9 |
-
|
| 10 |
-
# Byte-compiled / optimized / DLL files
|
| 11 |
-
__pycache__/
|
| 12 |
-
*.py[cod]
|
| 13 |
-
*$py.class
|
| 14 |
-
|
| 15 |
-
# C extensions
|
| 16 |
-
*.so
|
| 17 |
-
|
| 18 |
-
# Distribution / packaging
|
| 19 |
-
.Python
|
| 20 |
-
build/
|
| 21 |
-
develop-eggs/
|
| 22 |
-
dist/
|
| 23 |
-
downloads/
|
| 24 |
-
eggs/
|
| 25 |
-
.eggs/
|
| 26 |
-
lib/
|
| 27 |
-
lib64/
|
| 28 |
-
parts/
|
| 29 |
-
sdist/
|
| 30 |
-
var/
|
| 31 |
-
wheels/
|
| 32 |
-
pip-wheel-metadata/
|
| 33 |
-
share/python-wheels/
|
| 34 |
-
*.egg-info/
|
| 35 |
-
.installed.cfg
|
| 36 |
-
*.egg
|
| 37 |
-
MANIFEST
|
| 38 |
-
|
| 39 |
-
# PyInstaller
|
| 40 |
-
# Usually these files are written by a python script from a template
|
| 41 |
-
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 42 |
-
*.manifest
|
| 43 |
-
*.spec
|
| 44 |
-
|
| 45 |
-
# Installer logs
|
| 46 |
-
pip-log.txt
|
| 47 |
-
pip-delete-this-directory.txt
|
| 48 |
-
|
| 49 |
-
# Unit test / coverage reports
|
| 50 |
-
htmlcov/
|
| 51 |
-
.tox/
|
| 52 |
-
.nox/
|
| 53 |
-
.coverage
|
| 54 |
-
.coverage.*
|
| 55 |
-
.cache
|
| 56 |
-
nosetests.xml
|
| 57 |
-
coverage.xml
|
| 58 |
-
*.cover
|
| 59 |
-
*.py,cover
|
| 60 |
-
.hypothesis/
|
| 61 |
-
.pytest_cache/
|
| 62 |
-
|
| 63 |
-
# Translations
|
| 64 |
-
*.mo
|
| 65 |
-
*.pot
|
| 66 |
-
|
| 67 |
-
# Django stuff:
|
| 68 |
-
*.log
|
| 69 |
-
local_settings.py
|
| 70 |
-
db.sqlite3
|
| 71 |
-
db.sqlite3-journal
|
| 72 |
-
|
| 73 |
-
# Flask stuff:
|
| 74 |
-
instance/
|
| 75 |
-
.webassets-cache
|
| 76 |
-
|
| 77 |
-
# Scrapy stuff:
|
| 78 |
-
.scrapy
|
| 79 |
-
|
| 80 |
-
# Sphinx documentation
|
| 81 |
-
docs/_build/
|
| 82 |
-
|
| 83 |
-
# PyBuilder
|
| 84 |
-
target/
|
| 85 |
-
|
| 86 |
-
# Jupyter Notebook
|
| 87 |
-
.ipynb_checkpoints
|
| 88 |
-
|
| 89 |
-
# IPython
|
| 90 |
-
profile_default/
|
| 91 |
-
ipython_config.py
|
| 92 |
-
|
| 93 |
-
# pyenv
|
| 94 |
-
.python-version
|
| 95 |
-
|
| 96 |
-
# pipenv
|
| 97 |
-
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 98 |
-
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 99 |
-
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 100 |
-
# install all needed dependencies.
|
| 101 |
-
#Pipfile.lock
|
| 102 |
-
|
| 103 |
-
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
| 104 |
-
__pypackages__/
|
| 105 |
-
|
| 106 |
-
# Celery stuff
|
| 107 |
-
celerybeat-schedule
|
| 108 |
-
celerybeat.pid
|
| 109 |
-
|
| 110 |
-
# SageMath parsed files
|
| 111 |
-
*.sage.py
|
| 112 |
-
|
| 113 |
-
# Environments
|
| 114 |
-
.env
|
| 115 |
-
.venv
|
| 116 |
-
env/
|
| 117 |
-
venv/
|
| 118 |
-
ENV/
|
| 119 |
-
env.bak/
|
| 120 |
-
venv.bak/
|
| 121 |
-
|
| 122 |
-
# Spyder project settings
|
| 123 |
-
.spyderproject
|
| 124 |
-
.spyproject
|
| 125 |
-
|
| 126 |
-
# Rope project settings
|
| 127 |
-
.ropeproject
|
| 128 |
-
|
| 129 |
-
# mkdocs documentation
|
| 130 |
-
/site
|
| 131 |
-
|
| 132 |
-
# mypy
|
| 133 |
-
.mypy_cache/
|
| 134 |
-
.dmypy.json
|
| 135 |
-
dmypy.json
|
| 136 |
-
|
| 137 |
-
# Pyre type checker
|
| 138 |
-
.pyre/
|
| 139 |
-
sync.sh
|
| 140 |
-
gpu1sync.sh
|
| 141 |
-
.idea
|
| 142 |
-
*.pdf
|
| 143 |
-
**/._*
|
| 144 |
-
**/*DS_*
|
| 145 |
-
**.jsonl
|
| 146 |
-
src/sbatch
|
| 147 |
-
src/misc
|
| 148 |
-
.vscode
|
| 149 |
-
src/debug
|
| 150 |
-
core.*
|
| 151 |
-
|
| 152 |
-
# Allow
|
| 153 |
-
!src/evaluation/misc/results_dbs/*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/CITATION.cff
DELETED
|
@@ -1,33 +0,0 @@
|
|
| 1 |
-
cff-version: 1.1.0
|
| 2 |
-
message: If you use this software, please cite it as below.
|
| 3 |
-
authors:
|
| 4 |
-
- family-names: Ilharco
|
| 5 |
-
given-names: Gabriel
|
| 6 |
-
- family-names: Wortsman
|
| 7 |
-
given-names: Mitchell
|
| 8 |
-
- family-names: Wightman
|
| 9 |
-
given-names: Ross
|
| 10 |
-
- family-names: Gordon
|
| 11 |
-
given-names: Cade
|
| 12 |
-
- family-names: Carlini
|
| 13 |
-
given-names: Nicholas
|
| 14 |
-
- family-names: Taori
|
| 15 |
-
given-names: Rohan
|
| 16 |
-
- family-names: Dave
|
| 17 |
-
given-names: Achal
|
| 18 |
-
- family-names: Shankar
|
| 19 |
-
given-names: Vaishaal
|
| 20 |
-
- family-names: Namkoong
|
| 21 |
-
given-names: Hongseok
|
| 22 |
-
- family-names: Miller
|
| 23 |
-
given-names: John
|
| 24 |
-
- family-names: Hajishirzi
|
| 25 |
-
given-names: Hannaneh
|
| 26 |
-
- family-names: Farhadi
|
| 27 |
-
given-names: Ali
|
| 28 |
-
- family-names: Schmidt
|
| 29 |
-
given-names: Ludwig
|
| 30 |
-
title: OpenCLIP
|
| 31 |
-
version: v0.1
|
| 32 |
-
doi: 10.5281/zenodo.5143773
|
| 33 |
-
date-released: 2021-07-28
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/HISTORY.md
DELETED
|
@@ -1,176 +0,0 @@
|
|
| 1 |
-
## 2.18.0
|
| 2 |
-
|
| 3 |
-
* Enable int8 inference without `.weight` attribute
|
| 4 |
-
|
| 5 |
-
## 2.17.2
|
| 6 |
-
|
| 7 |
-
* Update push_to_hf_hub
|
| 8 |
-
|
| 9 |
-
## 2.17.0
|
| 10 |
-
|
| 11 |
-
* Add int8 support
|
| 12 |
-
* Update notebook demo
|
| 13 |
-
* Refactor zero-shot classification code
|
| 14 |
-
|
| 15 |
-
## 2.16.2
|
| 16 |
-
|
| 17 |
-
* Fixes for context_length and vocab_size attributes
|
| 18 |
-
|
| 19 |
-
## 2.16.1
|
| 20 |
-
|
| 21 |
-
* Fixes for context_length and vocab_size attributes
|
| 22 |
-
* Fix --train-num-samples logic
|
| 23 |
-
* Add HF BERT configs for PubMed CLIP model
|
| 24 |
-
|
| 25 |
-
## 2.16.0
|
| 26 |
-
|
| 27 |
-
* Add improved g-14 weights
|
| 28 |
-
* Update protobuf version
|
| 29 |
-
|
| 30 |
-
## 2.15.0
|
| 31 |
-
|
| 32 |
-
* Add convnext_xxlarge weights
|
| 33 |
-
* Fixed import in readme
|
| 34 |
-
* Add samples per second per gpu logging
|
| 35 |
-
* Fix slurm example
|
| 36 |
-
|
| 37 |
-
## 2.14.0
|
| 38 |
-
|
| 39 |
-
* Move dataset mixtures logic to shard level
|
| 40 |
-
* Fix CoCa accum-grad training
|
| 41 |
-
* Safer transformers import guard
|
| 42 |
-
* get_labels refactoring
|
| 43 |
-
|
| 44 |
-
## 2.13.0
|
| 45 |
-
|
| 46 |
-
* Add support for dataset mixtures with different sampling weights
|
| 47 |
-
* Make transformers optional again
|
| 48 |
-
|
| 49 |
-
## 2.12.0
|
| 50 |
-
|
| 51 |
-
* Updated convnext configs for consistency
|
| 52 |
-
* Added input_patchnorm option
|
| 53 |
-
* Clean and improve CoCa generation
|
| 54 |
-
* Support model distillation
|
| 55 |
-
* Add ConvNeXt-Large 320x320 fine-tune weights
|
| 56 |
-
|
| 57 |
-
## 2.11.1
|
| 58 |
-
|
| 59 |
-
* Make transformers optional
|
| 60 |
-
* Add MSCOCO CoCa finetunes to pretrained models
|
| 61 |
-
|
| 62 |
-
## 2.11.0
|
| 63 |
-
|
| 64 |
-
* coca support and weights
|
| 65 |
-
* ConvNeXt-Large weights
|
| 66 |
-
|
| 67 |
-
## 2.10.1
|
| 68 |
-
|
| 69 |
-
* `hf-hub:org/model_id` support for loading models w/ config and weights in Hugging Face Hub
|
| 70 |
-
|
| 71 |
-
## 2.10.0
|
| 72 |
-
|
| 73 |
-
* Added a ViT-bigG-14 model.
|
| 74 |
-
* Added an up-to-date example slurm script for large training jobs.
|
| 75 |
-
* Added a option to sync logs and checkpoints to S3 during training.
|
| 76 |
-
* New options for LR schedulers, constant and constant with cooldown
|
| 77 |
-
* Fix wandb autoresuming when resume is not set
|
| 78 |
-
* ConvNeXt `base` & `base_w` pretrained models added
|
| 79 |
-
* `timm-` model prefix removed from configs
|
| 80 |
-
* `timm` augmentation + regularization (dropout / drop-path) supported
|
| 81 |
-
|
| 82 |
-
## 2.9.3
|
| 83 |
-
|
| 84 |
-
* Fix wandb collapsing multiple parallel runs into a single one
|
| 85 |
-
|
| 86 |
-
## 2.9.2
|
| 87 |
-
|
| 88 |
-
* Fix braceexpand memory explosion for complex webdataset urls
|
| 89 |
-
|
| 90 |
-
## 2.9.1
|
| 91 |
-
|
| 92 |
-
* Fix release
|
| 93 |
-
|
| 94 |
-
## 2.9.0
|
| 95 |
-
|
| 96 |
-
* Add training feature to auto-resume from the latest checkpoint on restart via `--resume latest`
|
| 97 |
-
* Allow webp in webdataset
|
| 98 |
-
* Fix logging for number of samples when using gradient accumulation
|
| 99 |
-
* Add model configs for convnext xxlarge
|
| 100 |
-
|
| 101 |
-
## 2.8.2
|
| 102 |
-
|
| 103 |
-
* wrapped patchdropout in a torch.nn.Module
|
| 104 |
-
|
| 105 |
-
## 2.8.1
|
| 106 |
-
|
| 107 |
-
* relax protobuf dependency
|
| 108 |
-
* override the default patch dropout value in 'vision_cfg'
|
| 109 |
-
|
| 110 |
-
## 2.8.0
|
| 111 |
-
|
| 112 |
-
* better support for HF models
|
| 113 |
-
* add support for gradient accumulation
|
| 114 |
-
* CI fixes
|
| 115 |
-
* add support for patch dropout
|
| 116 |
-
* add convnext configs
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
## 2.7.0
|
| 120 |
-
|
| 121 |
-
* add multilingual H/14 xlm roberta large
|
| 122 |
-
|
| 123 |
-
## 2.6.1
|
| 124 |
-
|
| 125 |
-
* fix setup.py _read_reqs
|
| 126 |
-
|
| 127 |
-
## 2.6.0
|
| 128 |
-
|
| 129 |
-
* Make openclip training usable from pypi.
|
| 130 |
-
* Add xlm roberta large vit h 14 config.
|
| 131 |
-
|
| 132 |
-
## 2.5.0
|
| 133 |
-
|
| 134 |
-
* pretrained B/32 xlm roberta base: first multilingual clip trained on laion5B
|
| 135 |
-
* pretrained B/32 roberta base: first clip trained using an HF text encoder
|
| 136 |
-
|
| 137 |
-
## 2.4.1
|
| 138 |
-
|
| 139 |
-
* Add missing hf_tokenizer_name in CLIPTextCfg.
|
| 140 |
-
|
| 141 |
-
## 2.4.0
|
| 142 |
-
|
| 143 |
-
* Fix #211, missing RN50x64 config. Fix type of dropout param for ResNet models
|
| 144 |
-
* Bring back LayerNorm impl that casts to input for non bf16/fp16
|
| 145 |
-
* zero_shot.py: set correct tokenizer based on args
|
| 146 |
-
* training/params.py: remove hf params and get them from model config
|
| 147 |
-
|
| 148 |
-
## 2.3.1
|
| 149 |
-
|
| 150 |
-
* Implement grad checkpointing for hf model.
|
| 151 |
-
* custom_text: True if hf_model_name is set
|
| 152 |
-
* Disable hf tokenizer parallelism
|
| 153 |
-
|
| 154 |
-
## 2.3.0
|
| 155 |
-
|
| 156 |
-
* Generalizable Text Transformer with HuggingFace Models (@iejMac)
|
| 157 |
-
|
| 158 |
-
## 2.2.0
|
| 159 |
-
|
| 160 |
-
* Support for custom text tower
|
| 161 |
-
* Add checksum verification for pretrained model weights
|
| 162 |
-
|
| 163 |
-
## 2.1.0
|
| 164 |
-
|
| 165 |
-
* lot including sota models, bfloat16 option, better loading, better metrics
|
| 166 |
-
|
| 167 |
-
## 1.2.0
|
| 168 |
-
|
| 169 |
-
* ViT-B/32 trained on Laion2B-en
|
| 170 |
-
* add missing openai RN50x64 model
|
| 171 |
-
|
| 172 |
-
## 1.1.1
|
| 173 |
-
|
| 174 |
-
* ViT-B/16+
|
| 175 |
-
* Add grad checkpointing support
|
| 176 |
-
* more robust data loader
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/LICENSE
DELETED
|
@@ -1,23 +0,0 @@
|
|
| 1 |
-
Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman,
|
| 2 |
-
Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar,
|
| 3 |
-
John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi,
|
| 4 |
-
Ludwig Schmidt
|
| 5 |
-
|
| 6 |
-
Permission is hereby granted, free of charge, to any person obtaining
|
| 7 |
-
a copy of this software and associated documentation files (the
|
| 8 |
-
"Software"), to deal in the Software without restriction, including
|
| 9 |
-
without limitation the rights to use, copy, modify, merge, publish,
|
| 10 |
-
distribute, sublicense, and/or sell copies of the Software, and to
|
| 11 |
-
permit persons to whom the Software is furnished to do so, subject to
|
| 12 |
-
the following conditions:
|
| 13 |
-
|
| 14 |
-
The above copyright notice and this permission notice shall be
|
| 15 |
-
included in all copies or substantial portions of the Software.
|
| 16 |
-
|
| 17 |
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 18 |
-
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 19 |
-
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
| 20 |
-
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
| 21 |
-
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
| 22 |
-
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
| 23 |
-
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/MANIFEST.in
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
include src/open_clip/bpe_simple_vocab_16e6.txt.gz
|
| 2 |
-
include src/open_clip/model_configs/*.json
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/Makefile
DELETED
|
@@ -1,12 +0,0 @@
|
|
| 1 |
-
install: ## [Local development] Upgrade pip, install requirements, install package.
|
| 2 |
-
python -m pip install -U pip
|
| 3 |
-
python -m pip install -e .
|
| 4 |
-
|
| 5 |
-
install-training:
|
| 6 |
-
python -m pip install -r requirements-training.txt
|
| 7 |
-
|
| 8 |
-
install-test: ## [Local development] Install test requirements
|
| 9 |
-
python -m pip install -r requirements-test.txt
|
| 10 |
-
|
| 11 |
-
test: ## [Local development] Run unit tests
|
| 12 |
-
python -m pytest -x -s -v tests
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/README.md
DELETED
|
@@ -1,798 +0,0 @@
|
|
| 1 |
-
# OpenCLIP
|
| 2 |
-
|
| 3 |
-
[[Paper]](https://arxiv.org/abs/2212.07143) [[Clip Colab]](https://colab.research.google.com/github/mlfoundations/open_clip/blob/master/docs/Interacting_with_open_clip.ipynb) [[Coca Colab]](https://colab.research.google.com/github/mlfoundations/open_clip/blob/master/docs/Interacting_with_open_coca.ipynb)
|
| 4 |
-
[](https://pypi.python.org/pypi/open_clip_torch)
|
| 5 |
-
|
| 6 |
-
Welcome to an open source implementation of OpenAI's [CLIP](https://arxiv.org/abs/2103.00020) (Contrastive Language-Image Pre-training).
|
| 7 |
-
|
| 8 |
-
The goal of this repository is to enable training models with contrastive image-text supervision, and to investigate their properties such as robustness to distribution shift. Our starting point is an implementation of CLIP that matches the accuracy of the original CLIP models when trained on the same dataset.
|
| 9 |
-
Specifically, a ResNet-50 model trained with our codebase on OpenAI's [15 million image subset of YFCC](https://github.com/openai/CLIP/blob/main/data/yfcc100m.md) achieves **32.7%** top-1 accuracy on ImageNet. OpenAI's CLIP model reaches **31.3%** when trained on the same subset of YFCC. For ease of experimentation, we also provide code for training on the 3 million images in the [Conceptual Captions](https://ai.google.com/research/ConceptualCaptions/download) dataset, where a ResNet-50x4 trained with our codebase reaches 22.2% top-1 ImageNet accuracy.
|
| 10 |
-
|
| 11 |
-
We further this with a replication study on a dataset of comparable size to OpenAI's, [LAION-400M](https://arxiv.org/abs/2111.02114), and with the larger [LAION-2B](https://laion.ai/blog/laion-5b/) superset. In addition, we study scaling behavior in a paper on [reproducible scaling laws for contrastive language-image learning](https://arxiv.org/abs/2212.07143).
|
| 12 |
-
|
| 13 |
-
We have trained the following ViT CLIP models:
|
| 14 |
-
* ViT-B/32 on LAION-400M with a accuracy of **62.9%**, comparable to OpenAI's **63.2%**, zero-shot top-1 on ImageNet-1k
|
| 15 |
-
* ViT-B/32 on LAION-2B with a accuracy of **66.6%**.
|
| 16 |
-
* ViT-B/16 on LAION-400M achieving an accuracy of **67.1%**, lower than OpenAI's **68.3%** (as measured here, 68.6% in paper)
|
| 17 |
-
* ViT-B/16+ 240x240 (~50% more FLOPS than B/16 224x224) on LAION-400M achieving an accuracy of **69.2%**
|
| 18 |
-
* ViT-B/16 on LAION-2B with a accuracy of **70.2%**.
|
| 19 |
-
* ViT-L/14 on LAION-400M with an accuracy of **72.77%**, vs OpenAI's **75.5%** (as measured here, 75.3% in paper)
|
| 20 |
-
* ViT-L/14 on LAION-2B with an accuracy of **75.3%**, vs OpenAI's **75.5%** (as measured here, 75.3% in paper)
|
| 21 |
-
* CoCa ViT-L/14 on LAION-2B with an accuracy of **75.5%** (currently only 13B samples seen) vs. CLIP ViT-L/14 73.1% (on the same dataset and samples seen)
|
| 22 |
-
* ViT-H/14 on LAION-2B with an accuracy of **78.0**. The second best in1k zero-shot for released, open-source weights thus far.
|
| 23 |
-
* ViT-g/14 on LAION-2B with an accuracy of **76.6**. This was trained on reduced 12B samples seen schedule, same samples seen as 400M models.
|
| 24 |
-
* ViT-g/14 on LAION-2B with an accuracy of **78.5**. Full 34B samples seen schedule.
|
| 25 |
-
* ViT-G/14 on LAION-2B with an accuracy of **80.1**. The best in1k zero-shot for released, open-source weights thus far.
|
| 26 |
-
|
| 27 |
-
And the following ConvNeXt CLIP models:
|
| 28 |
-
* ConvNext-Base @ 224x224 on LAION-400M with an ImageNet-1k zero-shot top-1 of **66.3%**
|
| 29 |
-
* ConvNext-Base (W) @ 256x256 on LAION-2B with an ImageNet-1k zero-shot top-1 of **70.8%**
|
| 30 |
-
* ConvNext-Base (W) @ 256x256 /w augreg (extra augmentation + regularization) on LAION-2B with a top-1 of **71.5%**
|
| 31 |
-
* ConvNext-Base (W) @ 256x256 on LAION-A (900M sample aesthetic subset of 2B) with a top-1 of **71.0%**
|
| 32 |
-
* ConvNext-Base (W) @ 320x320 on LAION-A with a top-1 of **71.7%** (eval at 384x384 is **71.0**)
|
| 33 |
-
* ConvNext-Base (W) @ 320x320 /w augreg on LAION-A with a top-1 of **71.3%** (eval at 384x384 is **72.2%**)
|
| 34 |
-
* ConvNext-Large (D) @ 256x256 /w augreg on LAION-2B with a top-1 of **75.9%**
|
| 35 |
-
* ConvNext-Large (D) @ 320x320 fine-tune of 256x256 weights above for ~2.5B more samples on LAION-2B, top-1 of **76.6%**
|
| 36 |
-
* ConvNext-Large (D) @ 320x320 soup of 3 fine-tunes of 256x256 weights above on LAION-2B, top-1 of **76.9%**
|
| 37 |
-
* ConvNext-XXLarge @ 256x256 original run **79.1%**
|
| 38 |
-
* ConvNext-XXLarge @ 256x256 rewind of last 10% **79.3%**
|
| 39 |
-
* ConvNext-XXLarge @ 256x256 soup of original + rewind **79.4%**
|
| 40 |
-
|
| 41 |
-
Model cards w/ additional model specific details can be found on the Hugging Face Hub under the OpenCLIP library tag: https://huggingface.co/models?library=open_clip
|
| 42 |
-
|
| 43 |
-
As we describe in more detail [below](#why-are-low-accuracy-clip-models-interesting), CLIP models in a medium accuracy regime already allow us to draw conclusions about the robustness of larger CLIP models since the models follow [reliable scaling laws](https://arxiv.org/abs/2107.04649).
|
| 44 |
-
|
| 45 |
-
This codebase is work in progress, and we invite all to contribute in making it more accessible and useful. In the future, we plan to add support for TPU training and release larger models. We hope this codebase facilitates and promotes further research in contrastive image-text learning. Please submit an issue or send an email if you have any other requests or suggestions.
|
| 46 |
-
|
| 47 |
-
Note that portions of `src/open_clip/` modelling and tokenizer code are adaptations of OpenAI's official [repository](https://github.com/openai/CLIP).
|
| 48 |
-
|
| 49 |
-
## Approach
|
| 50 |
-
|
| 51 |
-
|  |
|
| 52 |
-
|:--:|
|
| 53 |
-
| Image Credit: https://github.com/openai/CLIP |
|
| 54 |
-
|
| 55 |
-
## Usage
|
| 56 |
-
|
| 57 |
-
```
|
| 58 |
-
pip install open_clip_torch
|
| 59 |
-
```
|
| 60 |
-
|
| 61 |
-
```python
|
| 62 |
-
import torch
|
| 63 |
-
from PIL import Image
|
| 64 |
-
import open_clip
|
| 65 |
-
|
| 66 |
-
model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k')
|
| 67 |
-
tokenizer = open_clip.get_tokenizer('ViT-B-32')
|
| 68 |
-
|
| 69 |
-
image = preprocess(Image.open("CLIP.png")).unsqueeze(0)
|
| 70 |
-
text = tokenizer(["a diagram", "a dog", "a cat"])
|
| 71 |
-
|
| 72 |
-
with torch.no_grad(), torch.cuda.amp.autocast():
|
| 73 |
-
image_features = model.encode_image(image)
|
| 74 |
-
text_features = model.encode_text(text)
|
| 75 |
-
image_features /= image_features.norm(dim=-1, keepdim=True)
|
| 76 |
-
text_features /= text_features.norm(dim=-1, keepdim=True)
|
| 77 |
-
|
| 78 |
-
text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1)
|
| 79 |
-
|
| 80 |
-
print("Label probs:", text_probs) # prints: [[1., 0., 0.]]
|
| 81 |
-
```
|
| 82 |
-
See also this [[Clip Colab]](https://colab.research.google.com/github/mlfoundations/open_clip/blob/master/docs/Interacting_with_open_clip.ipynb)
|
| 83 |
-
|
| 84 |
-
To compute billions of embeddings efficiently, you can use [clip-retrieval](https://github.com/rom1504/clip-retrieval) which has openclip support.
|
| 85 |
-
|
| 86 |
-
## Fine-tuning on classification tasks
|
| 87 |
-
|
| 88 |
-
This repository is focused on training CLIP models. To fine-tune a *trained* zero-shot model on a downstream classification task such as ImageNet, please see [our other repository: WiSE-FT](https://github.com/mlfoundations/wise-ft). The [WiSE-FT repository](https://github.com/mlfoundations/wise-ft) contains code for our paper on [Robust Fine-tuning of Zero-shot Models](https://arxiv.org/abs/2109.01903), in which we introduce a technique for fine-tuning zero-shot models while preserving robustness under distribution shift.
|
| 89 |
-
|
| 90 |
-
## Data
|
| 91 |
-
|
| 92 |
-
To download datasets as webdataset, we recommend [img2dataset](https://github.com/rom1504/img2dataset)
|
| 93 |
-
|
| 94 |
-
### Conceptual Captions
|
| 95 |
-
|
| 96 |
-
See [cc3m img2dataset example](https://github.com/rom1504/img2dataset/blob/main/dataset_examples/cc3m.md)
|
| 97 |
-
|
| 98 |
-
### YFCC and other datasets
|
| 99 |
-
|
| 100 |
-
In addition to specifying the training data via CSV files as mentioned above, our codebase also supports [webdataset](https://github.com/webdataset/webdataset), which is recommended for larger scale datasets. The expected format is a series of `.tar` files. Each of these `.tar` files should contain two files for each training example, one for the image and one for the corresponding text. Both files should have the same name but different extensions. For instance, `shard_001.tar` could contain files such as `abc.jpg` and `abc.txt`. You can learn more about `webdataset` at [https://github.com/webdataset/webdataset](https://github.com/webdataset/webdataset). We use `.tar` files with 1,000 data points each, which we create using [tarp](https://github.com/webdataset/tarp).
|
| 101 |
-
|
| 102 |
-
You can download the YFCC dataset from [Multimedia Commons](http://mmcommons.org/).
|
| 103 |
-
Similar to OpenAI, we used a subset of YFCC to reach the aforementioned accuracy numbers.
|
| 104 |
-
The indices of images in this subset are in [OpenAI's CLIP repository](https://github.com/openai/CLIP/blob/main/data/yfcc100m.md).
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
## Training CLIP
|
| 108 |
-
|
| 109 |
-
### Install
|
| 110 |
-
|
| 111 |
-
We advise you first create a virtual environment with:
|
| 112 |
-
|
| 113 |
-
```
|
| 114 |
-
python3 -m venv .env
|
| 115 |
-
source .env/bin/activate
|
| 116 |
-
pip install -U pip
|
| 117 |
-
```
|
| 118 |
-
|
| 119 |
-
You can then install openclip for training with `pip install 'open_clip_torch[training]'`.
|
| 120 |
-
|
| 121 |
-
#### Development
|
| 122 |
-
|
| 123 |
-
If you want to make changes to contribute code, you can close openclip then run `make install` in openclip folder (after creating a virtualenv)
|
| 124 |
-
|
| 125 |
-
Install pip PyTorch as per https://pytorch.org/get-started/locally/
|
| 126 |
-
|
| 127 |
-
You may run `make install-training` to install training deps
|
| 128 |
-
|
| 129 |
-
#### Testing
|
| 130 |
-
|
| 131 |
-
Test can be run with `make install-test` then `make test`
|
| 132 |
-
|
| 133 |
-
`python -m pytest -x -s -v tests -k "training"` to run a specific test
|
| 134 |
-
|
| 135 |
-
Running regression tests against a specific git revision or tag:
|
| 136 |
-
1. Generate testing data
|
| 137 |
-
```sh
|
| 138 |
-
python tests/util_test.py --model RN50 RN101 --save_model_list models.txt --git_revision 9d31b2ec4df6d8228f370ff20c8267ec6ba39383
|
| 139 |
-
```
|
| 140 |
-
**_WARNING_: This will invoke git and modify your working tree, but will reset it to the current state after data has been generated! \
|
| 141 |
-
Don't modify your working tree while test data is being generated this way.**
|
| 142 |
-
|
| 143 |
-
2. Run regression tests
|
| 144 |
-
```sh
|
| 145 |
-
OPEN_CLIP_TEST_REG_MODELS=models.txt python -m pytest -x -s -v -m regression_test
|
| 146 |
-
```
|
| 147 |
-
|
| 148 |
-
### Sample single-process running code:
|
| 149 |
-
|
| 150 |
-
```bash
|
| 151 |
-
python -m training.main \
|
| 152 |
-
--save-frequency 1 \
|
| 153 |
-
--zeroshot-frequency 1 \
|
| 154 |
-
--report-to tensorboard \
|
| 155 |
-
--train-data="/path/to/train_data.csv" \
|
| 156 |
-
--val-data="/path/to/validation_data.csv" \
|
| 157 |
-
--csv-img-key filepath \
|
| 158 |
-
--csv-caption-key title \
|
| 159 |
-
--imagenet-val=/path/to/imagenet/root/val/ \
|
| 160 |
-
--warmup 10000 \
|
| 161 |
-
--batch-size=128 \
|
| 162 |
-
--lr=1e-3 \
|
| 163 |
-
--wd=0.1 \
|
| 164 |
-
--epochs=30 \
|
| 165 |
-
--workers=8 \
|
| 166 |
-
--model RN50
|
| 167 |
-
```
|
| 168 |
-
|
| 169 |
-
Note: `imagenet-val` is the path to the *validation* set of ImageNet for zero-shot evaluation, not the training set!
|
| 170 |
-
You can remove this argument if you do not want to perform zero-shot evaluation on ImageNet throughout training. Note that the `val` folder should contain subfolders. If it doest not, please use [this script](https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh).
|
| 171 |
-
|
| 172 |
-
### Multi-GPU and Beyond
|
| 173 |
-
|
| 174 |
-
This code has been battle tested up to 1024 A100s and offers a variety of solutions
|
| 175 |
-
for distributed training. We include native support for SLURM clusters.
|
| 176 |
-
|
| 177 |
-
As the number of devices used to train increases, so does the space complexity of
|
| 178 |
-
the the logit matrix. Using a naïve all-gather scheme, space complexity will be
|
| 179 |
-
`O(n^2)`. Instead, complexity may become effectively linear if the flags
|
| 180 |
-
`--gather-with-grad` and `--local-loss` are used. This alteration results in one-to-one
|
| 181 |
-
numerical results as the naïve method.
|
| 182 |
-
|
| 183 |
-
#### Epochs
|
| 184 |
-
|
| 185 |
-
For larger datasets (eg Laion2B), we recommend setting --train-num-samples to a lower value than the full epoch, for example `--train-num-samples 135646078` to 1/16 of an epoch in conjunction with --dataset-resampled to do sampling with replacement. This allows having frequent checkpoints to evaluate more often.
|
| 186 |
-
|
| 187 |
-
#### Patch Dropout
|
| 188 |
-
|
| 189 |
-
<a href="https://arxiv.org/abs/2212.00794">Recent research</a> has shown that one can dropout half to three-quarters of the visual tokens, leading to up to 2-3x training speeds without loss of accuracy.
|
| 190 |
-
|
| 191 |
-
You can set this on your visual transformer config with the key `patch_dropout`.
|
| 192 |
-
|
| 193 |
-
In the paper, they also finetuned without the patch dropout at the end. You can do this with the command-line argument `--force-patch-dropout 0.`
|
| 194 |
-
|
| 195 |
-
#### Multiple data sources
|
| 196 |
-
|
| 197 |
-
OpenCLIP supports using multiple data sources, by separating different data paths with `::`.
|
| 198 |
-
For instance, to train on CC12M and on LAION, one might use `--train-data '/data/cc12m/cc12m-train-{0000..2175}.tar'::/data/LAION-400M/{00000..41455}.tar"`.
|
| 199 |
-
Using `--dataset-resampled` is recommended for these cases.
|
| 200 |
-
|
| 201 |
-
By default, on expectation the amount of times the model will see a sample from each source is proportional to the size of the source.
|
| 202 |
-
For instance, when training on one data source with size 400M and one with size 10M, samples from the first source are 40x more likely to be seen in expectation.
|
| 203 |
-
|
| 204 |
-
We also support different weighting of the data sources, by using the `--train-data-upsampling-factors` flag.
|
| 205 |
-
For instance, using `--train-data-upsampling-factors=1::1` in the above scenario is equivalent to not using the flag, and `--train-data-upsampling-factors=1::2` is equivalent to upsampling the second data source twice.
|
| 206 |
-
If you want to sample from data sources with the same frequency, the upsampling factors should be inversely proportional to the sizes of the data sources.
|
| 207 |
-
For instance, if dataset `A` has 1000 samples and dataset `B` has 100 samples, you can use `--train-data-upsampling-factors=0.001::0.01` (or analogously, `--train-data-upsampling-factors=1::10`).
|
| 208 |
-
|
| 209 |
-
#### Single-Node
|
| 210 |
-
|
| 211 |
-
We make use of `torchrun` to launch distributed jobs. The following launches a
|
| 212 |
-
a job on a node of 4 GPUs:
|
| 213 |
-
|
| 214 |
-
```bash
|
| 215 |
-
cd open_clip/src
|
| 216 |
-
torchrun --nproc_per_node 4 -m training.main \
|
| 217 |
-
--train-data '/data/cc12m/cc12m-train-{0000..2175}.tar' \
|
| 218 |
-
--train-num-samples 10968539 \
|
| 219 |
-
--dataset-type webdataset \
|
| 220 |
-
--batch-size 320 \
|
| 221 |
-
--precision amp \
|
| 222 |
-
--workers 4 \
|
| 223 |
-
--imagenet-val /data/imagenet/validation/
|
| 224 |
-
```
|
| 225 |
-
|
| 226 |
-
#### Multi-Node
|
| 227 |
-
|
| 228 |
-
The same script above works, so long as users include information about the number
|
| 229 |
-
of nodes and host node.
|
| 230 |
-
|
| 231 |
-
```bash
|
| 232 |
-
cd open_clip/src
|
| 233 |
-
torchrun --nproc_per_node=4 \
|
| 234 |
-
--rdzv_endpoint=$HOSTE_NODE_ADDR \
|
| 235 |
-
-m training.main \
|
| 236 |
-
--train-data '/data/cc12m/cc12m-train-{0000..2175}.tar' \
|
| 237 |
-
--train-num-samples 10968539 \
|
| 238 |
-
--dataset-type webdataset \
|
| 239 |
-
--batch-size 320 \
|
| 240 |
-
--precision amp \
|
| 241 |
-
--workers 4 \
|
| 242 |
-
--imagenet-val /data/imagenet/validation/
|
| 243 |
-
```
|
| 244 |
-
|
| 245 |
-
#### SLURM
|
| 246 |
-
|
| 247 |
-
This is likely the easiest solution to utilize. The following script was used to
|
| 248 |
-
train our largest models:
|
| 249 |
-
|
| 250 |
-
```bash
|
| 251 |
-
#!/bin/bash -x
|
| 252 |
-
#SBATCH --nodes=32
|
| 253 |
-
#SBATCH --gres=gpu:4
|
| 254 |
-
#SBATCH --ntasks-per-node=4
|
| 255 |
-
#SBATCH --cpus-per-task=6
|
| 256 |
-
#SBATCH --wait-all-nodes=1
|
| 257 |
-
#SBATCH --job-name=open_clip
|
| 258 |
-
#SBATCH --account=ACCOUNT_NAME
|
| 259 |
-
#SBATCH --partition PARTITION_NAME
|
| 260 |
-
|
| 261 |
-
eval "$(/path/to/conda/bin/conda shell.bash hook)" # init conda
|
| 262 |
-
conda activate open_clip
|
| 263 |
-
export CUDA_VISIBLE_DEVICES=0,1,2,3
|
| 264 |
-
export MASTER_PORT=12802
|
| 265 |
-
|
| 266 |
-
master_addr=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
| 267 |
-
export MASTER_ADDR=$master_addr
|
| 268 |
-
|
| 269 |
-
cd /shared/open_clip
|
| 270 |
-
export PYTHONPATH="$PYTHONPATH:$PWD/src"
|
| 271 |
-
srun --cpu_bind=v --accel-bind=gn python -u src/training/main.py \
|
| 272 |
-
--save-frequency 1 \
|
| 273 |
-
--report-to tensorboard \
|
| 274 |
-
--train-data="/data/LAION-400M/{00000..41455}.tar" \
|
| 275 |
-
--warmup 2000 \
|
| 276 |
-
--batch-size=256 \
|
| 277 |
-
--epochs=32 \
|
| 278 |
-
--workers=8 \
|
| 279 |
-
--model ViT-B-32 \
|
| 280 |
-
--name "ViT-B-32-Vanilla" \
|
| 281 |
-
--seed 0 \
|
| 282 |
-
--local-loss \
|
| 283 |
-
--gather-with-grad
|
| 284 |
-
```
|
| 285 |
-
|
| 286 |
-
### Resuming from a checkpoint:
|
| 287 |
-
|
| 288 |
-
```bash
|
| 289 |
-
python -m training.main \
|
| 290 |
-
--train-data="/path/to/train_data.csv" \
|
| 291 |
-
--val-data="/path/to/validation_data.csv" \
|
| 292 |
-
--resume /path/to/checkpoints/epoch_K.pt
|
| 293 |
-
```
|
| 294 |
-
|
| 295 |
-
### Training CoCa:
|
| 296 |
-
Training [CoCa](https://arxiv.org/abs/2205.01917) models is enabled through specifying a CoCa config using the ```--model``` parameter of the training script. Currently available configs are "coca_base", "coca_ViT-B-32", and "coca_roberta-ViT-B-32" (which uses RoBERTa as the text encoder). CoCa configs are different from CLIP configs because they have an additional "multimodal_cfg" component which specifies parameters for the multimodal text decoder. Here's an example from the coca_ViT-B-32 config:
|
| 297 |
-
```json
|
| 298 |
-
"multimodal_cfg": {
|
| 299 |
-
"context_length": 76,
|
| 300 |
-
"vocab_size": 49408,
|
| 301 |
-
"width": 512,
|
| 302 |
-
"heads": 8,
|
| 303 |
-
"layers": 12,
|
| 304 |
-
"latent_dim": 512,
|
| 305 |
-
"attn_pooler_heads": 8
|
| 306 |
-
}
|
| 307 |
-
```
|
| 308 |
-
Credit to [lucidrains](https://github.com/lucidrains) for [initial code](https://github.com/lucidrains/CoCa-pytorch), [gpucce](https://github.com/gpucce) for adapting the code to open_clip, and [iejMac](https://github.com/iejMac) for training the models.
|
| 309 |
-
|
| 310 |
-
### Generating text with CoCa
|
| 311 |
-
|
| 312 |
-
```python
|
| 313 |
-
import open_clip
|
| 314 |
-
import torch
|
| 315 |
-
from PIL import Image
|
| 316 |
-
|
| 317 |
-
model, _, transform = open_clip.create_model_and_transforms(
|
| 318 |
-
model_name="coca_ViT-L-14",
|
| 319 |
-
pretrained="mscoco_finetuned_laion2B-s13B-b90k"
|
| 320 |
-
)
|
| 321 |
-
|
| 322 |
-
im = Image.open("cat.jpg").convert("RGB")
|
| 323 |
-
im = transform(im).unsqueeze(0)
|
| 324 |
-
|
| 325 |
-
with torch.no_grad(), torch.cuda.amp.autocast():
|
| 326 |
-
generated = model.generate(im)
|
| 327 |
-
|
| 328 |
-
print(open_clip.decode(generated[0]).split("<end_of_text>")[0].replace("<start_of_text>", ""))
|
| 329 |
-
```
|
| 330 |
-
|
| 331 |
-
See also this [[Coca Colab]](https://colab.research.google.com/github/mlfoundations/open_clip/blob/master/docs/Interacting_with_open_coca.ipynb)
|
| 332 |
-
|
| 333 |
-
### Fine Tuning CoCa
|
| 334 |
-
|
| 335 |
-
To fine-tune coca on mscoco, first create the dataset, one way is using a csvdataset and perhaps the simplest way to do it is using [CLIP_benchmark](https://github.com/LAION-AI/CLIP_benchmark) which in turn uses [pycocotools](https://github.com/cocodataset/cocoapi) (that can be used also by itself).
|
| 336 |
-
|
| 337 |
-
```python
|
| 338 |
-
from clip_benchmark.datasets.builder import build_dataset
|
| 339 |
-
import pandas as pd
|
| 340 |
-
import os
|
| 341 |
-
|
| 342 |
-
root_path = "path/to/data/dir" # set this to smth meaningful
|
| 343 |
-
ds = build_dataset("mscoco_captions", root=root_path, split="train") # this downloads the dataset if it is not there already
|
| 344 |
-
coco = ds.coco
|
| 345 |
-
imgs = coco.loadImgs(coco.getImgIds())
|
| 346 |
-
future_df = {"filepath":[], "title":[]}
|
| 347 |
-
for img in imgs:
|
| 348 |
-
caps = coco.imgToAnns[img["id"]]
|
| 349 |
-
for cap in caps:
|
| 350 |
-
future_df["filepath"].append(img["file_name"])
|
| 351 |
-
future_df["title"].append(cap["caption"])
|
| 352 |
-
pd.DataFrame.from_dict(future_df).to_csv(
|
| 353 |
-
os.path.join(root_path, "train2014.csv"), index=False, sep="\t"
|
| 354 |
-
)
|
| 355 |
-
```
|
| 356 |
-
This should create a csv dataset that one can use to fine-tune coca with open_clip
|
| 357 |
-
```bash
|
| 358 |
-
python -m training.main \
|
| 359 |
-
--dataset-type "csv" \
|
| 360 |
-
--train-data "path/to/data/dir/train2014.csv" \
|
| 361 |
-
--warmup 1000 \
|
| 362 |
-
--batch-size 128 \
|
| 363 |
-
--lr 1e-5 \
|
| 364 |
-
--wd 0.1 \
|
| 365 |
-
--epochs 1 \
|
| 366 |
-
--workers 3 \
|
| 367 |
-
--model "coca_ViT-L-14" \
|
| 368 |
-
--report-to "wandb" \
|
| 369 |
-
--coca-contrastive-loss-weight 0 \
|
| 370 |
-
--coca-caption-loss-weight 1 \
|
| 371 |
-
--log-every-n-steps 100
|
| 372 |
-
```
|
| 373 |
-
|
| 374 |
-
This is a general setting, open_clip has very parameters that can be set, ```python -m training.main --help``` should show them. The only relevant change compared to pre-training are the two arguments
|
| 375 |
-
|
| 376 |
-
```bash
|
| 377 |
-
--coca-contrastive-loss-weight 0
|
| 378 |
-
--coca-caption-loss-weight 1
|
| 379 |
-
```
|
| 380 |
-
which make the model only train the generative side.
|
| 381 |
-
|
| 382 |
-
### Training with pre-trained language models as text encoder:
|
| 383 |
-
|
| 384 |
-
If you wish to use different language models as the text encoder for CLIP you can do so by using one of the Hugging Face model configs in ```src/open_clip/model_configs``` and passing in it's tokenizer as the ```--model``` and ```--hf-tokenizer-name``` parameters respectively. Currently we only support RoBERTa ("test-roberta" config), however adding new models should be trivial. You can also determine how many layers, from the end, to leave unfrozen with the ```--lock-text-unlocked-layers``` parameter. Here's an example command to train CLIP with the RoBERTa LM that has it's last 10 layers unfrozen:
|
| 385 |
-
```bash
|
| 386 |
-
python -m training.main \
|
| 387 |
-
--train-data="pipe:aws s3 cp s3://s-mas/cc3m/{00000..00329}.tar -" \
|
| 388 |
-
--train-num-samples 3000000 \
|
| 389 |
-
--val-data="pipe:aws s3 cp s3://s-mas/cc3m/{00330..00331}.tar -" \
|
| 390 |
-
--val-num-samples 10000 \
|
| 391 |
-
--dataset-type webdataset \
|
| 392 |
-
--batch-size 256 \
|
| 393 |
-
--warmup 2000 \
|
| 394 |
-
--epochs 10 \
|
| 395 |
-
--lr 5e-4 \
|
| 396 |
-
--precision amp \
|
| 397 |
-
--workers 6 \
|
| 398 |
-
--model "roberta-ViT-B-32" \
|
| 399 |
-
--lock-text \
|
| 400 |
-
--lock-text-unlocked-layers 10 \
|
| 401 |
-
--name "10_unfrozen" \
|
| 402 |
-
--report-to "tensorboard" \
|
| 403 |
-
```
|
| 404 |
-
|
| 405 |
-
### Loss Curves
|
| 406 |
-
|
| 407 |
-
When run on a machine with 8 GPUs the command should produce the following training curve for Conceptual Captions:
|
| 408 |
-
|
| 409 |
-

|
| 410 |
-
|
| 411 |
-
More detailed curves for Conceptual Captions are given at [/docs/clip_conceptual_captions.md](/docs/clip_conceptual_captions.md).
|
| 412 |
-
|
| 413 |
-
When training a RN50 on YFCC the same hyperparameters as above are used, with the exception of `lr=5e-4` and `epochs=32`.
|
| 414 |
-
|
| 415 |
-
Note that to use another model, like `ViT-B/32` or `RN50x4` or `RN50x16` or `ViT-B/16`, specify with `--model RN50x4`.
|
| 416 |
-
|
| 417 |
-
### Launch tensorboard:
|
| 418 |
-
```bash
|
| 419 |
-
tensorboard --logdir=logs/tensorboard/ --port=7777
|
| 420 |
-
```
|
| 421 |
-
|
| 422 |
-
## Evaluation / Zero-Shot
|
| 423 |
-
|
| 424 |
-
We recommend https://github.com/LAION-AI/CLIP_benchmark#how-to-use for systematic evaluation on 40 datasets.
|
| 425 |
-
|
| 426 |
-
### Evaluating local checkpoint:
|
| 427 |
-
|
| 428 |
-
```bash
|
| 429 |
-
python -m training.main \
|
| 430 |
-
--val-data="/path/to/validation_data.csv" \
|
| 431 |
-
--model RN101 \
|
| 432 |
-
--pretrained /path/to/checkpoints/epoch_K.pt
|
| 433 |
-
```
|
| 434 |
-
|
| 435 |
-
### Evaluating hosted pretrained checkpoint on ImageNet zero-shot prediction:
|
| 436 |
-
|
| 437 |
-
```bash
|
| 438 |
-
python -m training.main \
|
| 439 |
-
--imagenet-val /path/to/imagenet/validation \
|
| 440 |
-
--model ViT-B-32-quickgelu \
|
| 441 |
-
--pretrained laion400m_e32
|
| 442 |
-
```
|
| 443 |
-
|
| 444 |
-
## Pretrained model details
|
| 445 |
-
|
| 446 |
-
### LAION-400M - https://laion.ai/laion-400-open-dataset
|
| 447 |
-
|
| 448 |
-
We are working on reproducing OpenAI's ViT results with the comparably sized (and open) LAION-400M dataset. Trained
|
| 449 |
-
weights may be found in release [v0.2](https://github.com/mlfoundations/open_clip/releases/tag/v0.2-weights).
|
| 450 |
-
|
| 451 |
-
The LAION400M weights have been trained on the JUWELS supercomputer (see acknowledgements section below).
|
| 452 |
-
|
| 453 |
-
#### ViT-B/32 224x224
|
| 454 |
-
|
| 455 |
-
We replicate OpenAI's results on ViT-B/32, reaching a top-1 ImageNet-1k zero-shot accuracy of 62.96%.
|
| 456 |
-
|
| 457 |
-
<img src="https://raw.githubusercontent.com/mlfoundations/open_clip/main/docs/laion_clip_zeroshot.png" width="700">
|
| 458 |
-
|
| 459 |
-
__Zero-shot comparison (courtesy of Andreas Fürst)__
|
| 460 |
-
<img src="https://raw.githubusercontent.com/mlfoundations/open_clip/main/docs/laion_openai_compare_b32.jpg" width="700">
|
| 461 |
-
|
| 462 |
-
ViT-B/32 was trained with 128 A100 (40 GB) GPUs for ~36 hours, 4600 GPU-hours. The per-GPU batch size was 256 for a global batch size of 32768. 256 is much lower than it could have been (~320-384) due to being sized initially before moving to 'local' contrastive loss.
|
| 463 |
-
|
| 464 |
-
#### ViT-B/16 224x224
|
| 465 |
-
|
| 466 |
-
The B/16 LAION400M training reached a top-1 ImageNet-1k zero-shot validation score of 67.07.
|
| 467 |
-
|
| 468 |
-
<img src="https://raw.githubusercontent.com/mlfoundations/open_clip/main/docs/laion_clip_zeroshot_b16.png" width="700">
|
| 469 |
-
|
| 470 |
-
This was the first major train session using the updated webdataset 0.2.x code. A bug was found that prevented shards from being shuffled properly between nodes/workers each epoch. This was fixed part way through training (epoch 26) but likely had an impact.
|
| 471 |
-
|
| 472 |
-
ViT-B/16 was trained with 176 A100 (40 GB) GPUS for ~61 hours, 10700 GPU-hours. Batch size per GPU was 192 for a global batch size of 33792.
|
| 473 |
-
|
| 474 |
-
#### ViT-B/16+ 240x240
|
| 475 |
-
|
| 476 |
-
The B/16+ 240x240 LAION400M training reached a top-1 ImageNet-1k zero-shot validation score of 69.21.
|
| 477 |
-
|
| 478 |
-
This model is the same depth as the B/16, but increases the
|
| 479 |
-
* vision width from 768 -> 896
|
| 480 |
-
* text width from 512 -> 640
|
| 481 |
-
* the resolution 224x224 -> 240x240 (196 -> 225 tokens)
|
| 482 |
-
|
| 483 |
-
<img src="https://raw.githubusercontent.com/mlfoundations/open_clip/main/docs/laion_clip_zeroshot_b16_plus_240.png" width="700">
|
| 484 |
-
|
| 485 |
-
Unlike the B/16 run above, this model was a clean run with no dataset shuffling issues.
|
| 486 |
-
|
| 487 |
-
ViT-B/16+ was trained with 224 A100 (40 GB) GPUS for ~61 hours, 13620 GPU-hours. Batch size per GPU was 160 for a global batch size of 35840.
|
| 488 |
-
|
| 489 |
-
#### ViT-L/14 224x224
|
| 490 |
-
|
| 491 |
-
The L/14 LAION-400M training reached a top-1 ImageNet-1k zero-shot validation score of 72.77.
|
| 492 |
-
|
| 493 |
-
<img src="https://raw.githubusercontent.com/mlfoundations/open_clip/main/docs/laion_clip_zeroshot_l14.png" width="700">
|
| 494 |
-
|
| 495 |
-
ViT-L/14 was trained with 400 A100 (40 GB) GPUS for ~127 hours, 50800 GPU-hours. Batch size per GPU was 96 for a global batch size of 38400. Grad checkpointing was enabled.
|
| 496 |
-
|
| 497 |
-
### LAION-2B (en) - https://laion.ai/laion-5b-a-new-era-of-open-large-scale-multi-modal-datasets/
|
| 498 |
-
|
| 499 |
-
A ~2B sample subset of LAION-5B with english captions (https://huggingface.co/datasets/laion/laion2B-en)
|
| 500 |
-
|
| 501 |
-
#### ViT-B/32 224x224
|
| 502 |
-
A ViT-B/32 trained on LAION-2B, reaching a top-1 ImageNet-1k zero-shot accuracy of 65.62%.
|
| 503 |
-
|
| 504 |
-
<img src="https://raw.githubusercontent.com/mlfoundations/open_clip/main/docs/laion2b_clip_zeroshot_b32.png" width="700">
|
| 505 |
-
|
| 506 |
-
ViT-B/32 was trained with 112 A100 (40 GB) GPUs. The per-GPU batch size was 416 for a global batch size of 46592. Compute generously provided by [stability.ai](https://stability.ai/).
|
| 507 |
-
|
| 508 |
-
A second iteration of B/32 was trained on stability.ai cluster with a larger global batch size and learning rate, hitting 66.6% top-1. See https://huggingface.co/laion/CLIP-ViT-B-32-laion2B-s34B-b79K
|
| 509 |
-
|
| 510 |
-
#### ViT-L/14 224x224
|
| 511 |
-
|
| 512 |
-
A ViT-L/14 with a 75.3% top-1 ImageNet-1k zero-shot was trained on JUWELS Booster. See model details here https://huggingface.co/laion/CLIP-ViT-L-14-laion2B-s32B-b82K
|
| 513 |
-
|
| 514 |
-
These weights use a different dataset mean and std than others. Instead of using the OpenAI mean & std, inception style normalization `[-1, 1]` is used via a mean and std of `[0.5, 0.5, 0.5]`. This is handled automatically if using `open_clip.create_model_and_transforms` from pretrained weights.
|
| 515 |
-
|
| 516 |
-
#### ViT-H/14 224x224
|
| 517 |
-
|
| 518 |
-
A ViT-H/14 with a 78.0% top-1 ImageNet-1k zero-shot was trained on JUWELS Booster. See model details here https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K
|
| 519 |
-
|
| 520 |
-
#### ViT-g/14 224x224
|
| 521 |
-
|
| 522 |
-
A ViT-g/14 with a 76.6% top-1 ImageNet-1k zero-shot was trained on JUWELS Booster. See model details here https://huggingface.co/laion/CLIP-ViT-g-14-laion2B-s12B-b42K
|
| 523 |
-
|
| 524 |
-
This model was trained with a shorted schedule than other LAION-2B models with 12B samples seen instead of 32+B. It matches LAION-400M training in samples seen. Many zero-shot results are lower as a result, but despite this it performs very well in some OOD zero-shot and retrieval tasks.
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
#### ViT-B/32 roberta base
|
| 528 |
-
|
| 529 |
-
A ViT-B/32 with roberta base encoder with a 61.7% top-1 ImageNet-1k zero-shot was trained on stability. See model details here https://huggingface.co/laion/CLIP-ViT-B-32-roberta-base-laion2B-s12B-b32k
|
| 530 |
-
This is the first openclip model using a HF text tower. It has better performance on a range of tasks compared to the standard text encoder, see [metrics](https://huggingface.co/laion/CLIP-ViT-B-32-roberta-base-laion2B-s12B-b32k/blob/main/unknown.png)
|
| 531 |
-
|
| 532 |
-
#### ViT-B/32 xlm roberta base
|
| 533 |
-
|
| 534 |
-
A ViT-B/32 with xlm roberta base encoder with a 62.33% top-1 ImageNet-1k zero-shot was trained on stability. See model details here https://huggingface.co/laion/CLIP-ViT-B-32-xlm-roberta-base-laion5B-s13B-b90k
|
| 535 |
-
This is the first openclip model trained on the full laion5B dataset; hence the first multilingual clip trained with openclip. It has better performance on a range of tasks compared to the standard text encoder, see [metrics](https://huggingface.co/laion/CLIP-ViT-B-32-xlm-roberta-base-laion5B-s13B-b90k/blob/main/metrics.png)
|
| 536 |
-
A preliminary multilingual evaluation was run: 43% on imagenet1k italian (vs 21% for english B/32), 37% for imagenet1k japanese (vs 1% for english B/32 and 50% for B/16 clip japanese). It shows the multilingual property is indeed there as expected. Larger models will get even better performance.
|
| 537 |
-
|
| 538 |
-
#### ViT-H/14 xlm roberta large
|
| 539 |
-
|
| 540 |
-
A ViT-H/14 with xlm roberta large encoder with a 77.0% (vs 78% for the english equivalent) top-1 ImageNet-1k zero-shot was trained on stability. See model details here https://huggingface.co/laion/CLIP-ViT-H-14-frozen-xlm-roberta-large-laion5B-s13B-b90k
|
| 541 |
-
|
| 542 |
-
This model was trained following the [LiT](https://arxiv.org/abs/2111.07991) methodology: the image tower was frozen (initialized from english openclip ViT-H/14), the text tower was initialized from [xlm roberta large](https://huggingface.co/xlm-roberta-large) and unfrozen. This reduced training cost by a 3x factor.
|
| 543 |
-
|
| 544 |
-
See full english [metrics](https://huggingface.co/laion/CLIP-ViT-H-14-frozen-xlm-roberta-large-laion5B-s13B-b90k/resolve/main/results_xlm_roberta_large.png)
|
| 545 |
-
|
| 546 |
-
On zero shot classification on imagenet with translated prompts this model reaches:
|
| 547 |
-
|
| 548 |
-
* 56% in italian (vs 21% for https://github.com/clip-italian/clip-italian)
|
| 549 |
-
* 53% in japanese (vs 54.6% for https://github.com/rinnakk/japanese-clip)
|
| 550 |
-
* 55.7% in chinese (to be compared with https://github.com/OFA-Sys/Chinese-CLIP)
|
| 551 |
-
|
| 552 |
-
|
| 553 |
-
#### YFCC-15M
|
| 554 |
-
|
| 555 |
-
Below are checkpoints of models trained on YFCC-15M, along with their zero-shot top-1 accuracies on ImageNet and ImageNetV2. These models were trained using 8 GPUs and the same hyperparameters described in the "Sample running code" section, with the exception of `lr=5e-4` and `epochs=32`.
|
| 556 |
-
|
| 557 |
-
* [ResNet-50](https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt) (32.7% / 27.9%)
|
| 558 |
-
* [ResNet-101](https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt) (34.8% / 30.0%)
|
| 559 |
-
|
| 560 |
-
#### CC12M - https://github.com/google-research-datasets/conceptual-12m
|
| 561 |
-
|
| 562 |
-
* [ResNet-50](https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt) (36.45%)
|
| 563 |
-
|
| 564 |
-
### Pretrained Model Interface
|
| 565 |
-
|
| 566 |
-
We offer a simple model interface to instantiate both pre-trained and untrained models.
|
| 567 |
-
|
| 568 |
-
NOTE: Many existing checkpoints use the QuickGELU activation from the original OpenAI models. This activation is actually less efficient than native torch.nn.GELU in recent versions of PyTorch. The model defaults are now nn.GELU, so one should use model definitions with `-quickgelu` postfix for the OpenCLIP pretrained weights. All OpenAI pretrained weights will always default to QuickGELU. One can also use the non `-quickgelu` model definitions with pretrained weights using QuickGELU but there will be an accuracy drop, for fine-tune that will likely vanish for longer runs.
|
| 569 |
-
|
| 570 |
-
Future trained models will use nn.GELU.
|
| 571 |
-
|
| 572 |
-
```python
|
| 573 |
-
>>> import open_clip
|
| 574 |
-
>>> open_clip.list_pretrained()
|
| 575 |
-
[('RN50', 'openai'),
|
| 576 |
-
('RN50', 'yfcc15m'),
|
| 577 |
-
('RN50', 'cc12m'),
|
| 578 |
-
('RN50-quickgelu', 'openai'),
|
| 579 |
-
('RN50-quickgelu', 'yfcc15m'),
|
| 580 |
-
('RN50-quickgelu', 'cc12m'),
|
| 581 |
-
('RN101', 'openai'),
|
| 582 |
-
('RN101', 'yfcc15m'),
|
| 583 |
-
('RN101-quickgelu', 'openai'),
|
| 584 |
-
('RN101-quickgelu', 'yfcc15m'),
|
| 585 |
-
('RN50x4', 'openai'),
|
| 586 |
-
('RN50x16', 'openai'),
|
| 587 |
-
('RN50x64', 'openai'),
|
| 588 |
-
('ViT-B-32', 'openai'),
|
| 589 |
-
('ViT-B-32', 'laion400m_e31'),
|
| 590 |
-
('ViT-B-32', 'laion400m_e32'),
|
| 591 |
-
('ViT-B-32', 'laion2b_e16'),
|
| 592 |
-
('ViT-B-32', 'laion2b_s34b_b79k'),
|
| 593 |
-
('ViT-B-32-quickgelu', 'openai'),
|
| 594 |
-
('ViT-B-32-quickgelu', 'laion400m_e31'),
|
| 595 |
-
('ViT-B-32-quickgelu', 'laion400m_e32'),
|
| 596 |
-
('ViT-B-16', 'openai'),
|
| 597 |
-
('ViT-B-16', 'laion400m_e31'),
|
| 598 |
-
('ViT-B-16', 'laion400m_e32'),
|
| 599 |
-
('ViT-B-16', 'laion2b_s34b_b88k'),
|
| 600 |
-
('ViT-B-16-plus-240', 'laion400m_e31'),
|
| 601 |
-
('ViT-B-16-plus-240', 'laion400m_e32'),
|
| 602 |
-
('ViT-L-14', 'openai'),
|
| 603 |
-
('ViT-L-14', 'laion400m_e31'),
|
| 604 |
-
('ViT-L-14', 'laion400m_e32'),
|
| 605 |
-
('ViT-L-14', 'laion2b_s32b_b82k'),
|
| 606 |
-
('ViT-L-14-336', 'openai'),
|
| 607 |
-
('ViT-H-14', 'laion2b_s32b_b79k'),
|
| 608 |
-
('ViT-g-14', 'laion2b_s12b_b42k'),
|
| 609 |
-
('ViT-g-14', 'laion2b_s34b_b88k'),
|
| 610 |
-
('ViT-bigG-14', 'laion2b_s39b_b160k'),
|
| 611 |
-
('roberta-ViT-B-32', 'laion2b_s12b_b32k'),
|
| 612 |
-
('xlm-roberta-base-ViT-B-32', 'laion5b_s13b_b90k'),
|
| 613 |
-
('xlm-roberta-large-ViT-H-14', 'frozen_laion5b_s13b_b90k'),
|
| 614 |
-
('convnext_base', 'laion400m_s13b_b51k'),
|
| 615 |
-
('convnext_base_w', 'laion2b_s13b_b82k'),
|
| 616 |
-
('convnext_base_w', 'laion2b_s13b_b82k_augreg'),
|
| 617 |
-
('convnext_base_w', 'laion_aesthetic_s13b_b82k'),
|
| 618 |
-
('convnext_base_w_320', 'laion_aesthetic_s13b_b82k'),
|
| 619 |
-
('convnext_base_w_320', 'laion_aesthetic_s13b_b82k_augreg'),
|
| 620 |
-
('convnext_large_d', 'laion2b_s26b_b102k_augreg'),
|
| 621 |
-
('convnext_large_d_320', 'laion2b_s29b_b131k_ft'),
|
| 622 |
-
('convnext_large_d_320', 'laion2b_s29b_b131k_ft_soup'),
|
| 623 |
-
('convnext_xxlarge', 'laion2b_s34b_b82k_augreg'),
|
| 624 |
-
('convnext_xxlarge', 'laion2b_s34b_b82k_augreg_rewind'),
|
| 625 |
-
('convnext_xxlarge', 'laion2b_s34b_b82k_augreg_soup'),
|
| 626 |
-
('coca_ViT-B-32', 'laion2b_s13b_b90k'),
|
| 627 |
-
('coca_ViT-B-32', 'mscoco_finetuned_laion2b_s13b_b90k'),
|
| 628 |
-
('coca_ViT-L-14', 'laion2b_s13b_b90k'),
|
| 629 |
-
('coca_ViT-L-14', 'mscoco_finetuned_laion2b_s13b_b90k')]
|
| 630 |
-
|
| 631 |
-
>>> model, train_transform, eval_transform = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k')
|
| 632 |
-
```
|
| 633 |
-
### Model distillation
|
| 634 |
-
|
| 635 |
-
You can distill from a pre-trained by using `--distill-model` and `--distill-pretrained` to specify the model you'd like to distill from.
|
| 636 |
-
For instance, to distill from OpenAI ViT-L/14 use `--distill-model ViT-L-14 --distill-pretrained openai`.
|
| 637 |
-
|
| 638 |
-
### Gradient accumulation
|
| 639 |
-
|
| 640 |
-
To simulate larger batches use `--accum-freq k`. If per gpu batch size, `--batch-size`, is `m`, then the effective batch size will be `k * m * num_gpus`.
|
| 641 |
-
|
| 642 |
-
When increasing `--accum-freq` from its default of 1, samples/s will remain approximately constant (batch size will double, as will time-per-batch). It is recommended to use other features to reduce batch size such as `--grad-checkpointing --local-loss --gather-with-grad` before increasing `--accum-freq`. `--accum-freq` can be used in addition to these features.
|
| 643 |
-
|
| 644 |
-
Instead of 1 forward pass per example, there are now 2 forward passes per-example. However, the first is done with `torch.no_grad`.
|
| 645 |
-
|
| 646 |
-
There is some additional GPU memory required --- the features and data from all `m` batches are stored in memory.
|
| 647 |
-
|
| 648 |
-
There are also `m` loss computations instead of the usual 1.
|
| 649 |
-
|
| 650 |
-
For more information see Cui et al. (https://arxiv.org/abs/2112.09331) or Pham et al. (https://arxiv.org/abs/2111.10050).
|
| 651 |
-
|
| 652 |
-
### Int8 Support
|
| 653 |
-
|
| 654 |
-
We have beta support for int8 training and inference.
|
| 655 |
-
You can enable int8 training with `--use-bnb-linear SwitchBackLinearGlobal` or `--use-bnb-linear SwitchBackLinearGlobalMemEfficient`.
|
| 656 |
-
Please see the bitsandbytes library for definitions for these layers.
|
| 657 |
-
For CLIP VIT-Huge this should currently correspond to a 10% training speedup with no accuracy loss.
|
| 658 |
-
More speedups comin when the attention layer is refactored so that linear layers man be replaced there, too.
|
| 659 |
-
|
| 660 |
-
See the tutorial https://github.com/mlfoundations/open_clip/blob/main/tutorials/int8_tutorial.ipynb or [paper](https://arxiv.org/abs/2304.13013).
|
| 661 |
-
|
| 662 |
-
### Support for remote loading/training
|
| 663 |
-
|
| 664 |
-
It is always possible to resume directly from a remote file, e.g., a file in an s3 bucket. Just set `--resume s3://<path-to-checkpoint> `.
|
| 665 |
-
This will work with any filesystem supported by `fsspec`.
|
| 666 |
-
|
| 667 |
-
It is also possible to train `open_clip` models while continuously backing up to s3. This can help to avoid slow local file systems.
|
| 668 |
-
|
| 669 |
-
Say that your node has a local ssd `/scratch`, an s3 bucket `s3://<path-to-bucket>`.
|
| 670 |
-
|
| 671 |
-
In that case, set `--logs /scratch` and `--remote-sync s3://<path-to-bucket>`. Then, a background process will sync `/scratch/<run-name>` to `s3://<path-to-bucket>/<run-name>`. After syncing, the background process will sleep for `--remote-sync-frequency` seconds, which defaults to 5 minutes.
|
| 672 |
-
|
| 673 |
-
There is also experimental support for syncing to other remote file systems, not just s3. To do so, specify `--remote-sync-protocol fsspec`. However, this is currently very slow and not recommended.
|
| 674 |
-
|
| 675 |
-
Also, to optionally avoid saving too many checkpoints locally when using these features, you can use `--delete-previous-checkpoint` which deletes the previous checkpoint after saving a new one.
|
| 676 |
-
|
| 677 |
-
Note: if you are using this feature with `--resume latest`, there are a few warnings. First, use with `--save-most-recent` is not supported. Second, only `s3` is supported. Finally, since the sync happens in the background, it is possible that the most recent checkpoint may not be finished syncing to the remote.
|
| 678 |
-
|
| 679 |
-
### Pushing Models to Hugging Face Hub
|
| 680 |
-
|
| 681 |
-
The module `open_clip.push_to_hf_hub` includes helpers for pushing models /w weights and config to the HF Hub.
|
| 682 |
-
|
| 683 |
-
The tool can be run from command line, ex:
|
| 684 |
-
`python -m open_clip.push_to_hf_hub --model convnext_large_d_320 --pretrained /train/checkpoints/epoch_12.pt --repo-id laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft`
|
| 685 |
-
|
| 686 |
-
## Scaling trends
|
| 687 |
-
|
| 688 |
-
The plot below shows how zero-shot performance of CLIP models varies as we scale the number of samples used for training. Zero-shot performance increases steadily for both ImageNet and [ImageNetV2](https://arxiv.org/abs/1902.10811), and is far from saturated at ~15M samples.
|
| 689 |
-
|
| 690 |
-
<img src="https://raw.githubusercontent.com/mlfoundations/open_clip/main/docs/scaling.png" width="700">
|
| 691 |
-
|
| 692 |
-
## Why are low-accuracy CLIP models interesting?
|
| 693 |
-
|
| 694 |
-
**TL;DR:** CLIP models have high effective robustness, even at small scales.
|
| 695 |
-
|
| 696 |
-
CLIP models are particularly intriguing because they are more robust to natural distribution shifts (see Section 3.3 in the [CLIP paper](https://arxiv.org/abs/2103.00020)).
|
| 697 |
-
This phenomena is illustrated by the figure below, with ImageNet accuracy on the x-axis
|
| 698 |
-
and [ImageNetV2](https://arxiv.org/abs/1902.10811) (a reproduction of the ImageNet validation set with distribution shift) accuracy on the y-axis.
|
| 699 |
-
Standard training denotes training on the ImageNet train set and the CLIP zero-shot models
|
| 700 |
-
are shown as stars.
|
| 701 |
-
|
| 702 |
-

|
| 703 |
-
|
| 704 |
-
As observed by [Taori et al., 2020](https://arxiv.org/abs/2007.00644) and [Miller et al., 2021](https://arxiv.org/abs/2107.04649), the in-distribution
|
| 705 |
-
and out-of-distribution accuracies of models trained on ImageNet follow a predictable linear trend (the red line in the above plot). *Effective robustness*
|
| 706 |
-
quantifies robustness as accuracy beyond this baseline, i.e., how far a model lies above the red line. Ideally a model would not suffer from distribution shift and fall on the y = x line ([trained human labelers are within a percentage point of the y = x line](http://proceedings.mlr.press/v119/shankar20c.html)).
|
| 707 |
-
|
| 708 |
-
Even though the CLIP models trained with
|
| 709 |
-
this codebase achieve much lower accuracy than those trained by OpenAI, our models still lie on the same
|
| 710 |
-
trend of improved effective robustness (the purple line). Therefore, we can study what makes
|
| 711 |
-
CLIP robust without requiring industrial-scale compute.
|
| 712 |
-
|
| 713 |
-
For more information on effective robustness, please see:
|
| 714 |
-
|
| 715 |
-
- [Recht et al., 2019](https://arxiv.org/abs/1902.10811).
|
| 716 |
-
- [Taori et al., 2020](https://arxiv.org/abs/2007.00644).
|
| 717 |
-
- [Miller et al., 2021](https://arxiv.org/abs/2107.04649).
|
| 718 |
-
|
| 719 |
-
To know more about the factors that contribute to CLIP's robustness refer to [Fang et al., 2022](https://arxiv.org/abs/2205.01397).
|
| 720 |
-
|
| 721 |
-
## Acknowledgments
|
| 722 |
-
|
| 723 |
-
We gratefully acknowledge the Gauss Centre for Supercomputing e.V. (www.gauss-centre.eu) for funding this part of work by providing computing time through the John von Neumann Institute for Computing (NIC) on the GCS Supercomputer JUWELS Booster at Jülich Supercomputing Centre (JSC).
|
| 724 |
-
|
| 725 |
-
## The Team
|
| 726 |
-
|
| 727 |
-
Current development of this repository is led by [Ross Wightman](https://rwightman.com/), [Cade Gordon](http://cadegordon.io/), and [Vaishaal Shankar](http://vaishaal.com/).
|
| 728 |
-
|
| 729 |
-
The original version of this repository is from a group of researchers at UW, Google, Stanford, Amazon, Columbia, and Berkeley.
|
| 730 |
-
|
| 731 |
-
[Gabriel Ilharco*](http://gabrielilharco.com/), [Mitchell Wortsman*](https://mitchellnw.github.io/), [Nicholas Carlini](https://nicholas.carlini.com/), [Rohan Taori](https://www.rohantaori.com/), [Achal Dave](http://www.achaldave.com/), [Vaishaal Shankar](http://vaishaal.com/), [John Miller](https://people.eecs.berkeley.edu/~miller_john/), [Hongseok Namkoong](https://hsnamkoong.github.io/), [Hannaneh Hajishirzi](https://homes.cs.washington.edu/~hannaneh/), [Ali Farhadi](https://homes.cs.washington.edu/~ali/), [Ludwig Schmidt](https://people.csail.mit.edu/ludwigs/)
|
| 732 |
-
|
| 733 |
-
Special thanks to [Jong Wook Kim](https://jongwook.kim/) and [Alec Radford](https://github.com/Newmu) for help with reproducing CLIP!
|
| 734 |
-
|
| 735 |
-
## Citing
|
| 736 |
-
|
| 737 |
-
If you found this repository useful, please consider citing:
|
| 738 |
-
```bibtex
|
| 739 |
-
@software{ilharco_gabriel_2021_5143773,
|
| 740 |
-
author = {Ilharco, Gabriel and
|
| 741 |
-
Wortsman, Mitchell and
|
| 742 |
-
Wightman, Ross and
|
| 743 |
-
Gordon, Cade and
|
| 744 |
-
Carlini, Nicholas and
|
| 745 |
-
Taori, Rohan and
|
| 746 |
-
Dave, Achal and
|
| 747 |
-
Shankar, Vaishaal and
|
| 748 |
-
Namkoong, Hongseok and
|
| 749 |
-
Miller, John and
|
| 750 |
-
Hajishirzi, Hannaneh and
|
| 751 |
-
Farhadi, Ali and
|
| 752 |
-
Schmidt, Ludwig},
|
| 753 |
-
title = {OpenCLIP},
|
| 754 |
-
month = jul,
|
| 755 |
-
year = 2021,
|
| 756 |
-
note = {If you use this software, please cite it as below.},
|
| 757 |
-
publisher = {Zenodo},
|
| 758 |
-
version = {0.1},
|
| 759 |
-
doi = {10.5281/zenodo.5143773},
|
| 760 |
-
url = {https://doi.org/10.5281/zenodo.5143773}
|
| 761 |
-
}
|
| 762 |
-
```
|
| 763 |
-
|
| 764 |
-
```bibtex
|
| 765 |
-
@inproceedings{Radford2021LearningTV,
|
| 766 |
-
title={Learning Transferable Visual Models From Natural Language Supervision},
|
| 767 |
-
author={Alec Radford and Jong Wook Kim and Chris Hallacy and A. Ramesh and Gabriel Goh and Sandhini Agarwal and Girish Sastry and Amanda Askell and Pamela Mishkin and Jack Clark and Gretchen Krueger and Ilya Sutskever},
|
| 768 |
-
booktitle={ICML},
|
| 769 |
-
year={2021}
|
| 770 |
-
}
|
| 771 |
-
```
|
| 772 |
-
|
| 773 |
-
```bibtex
|
| 774 |
-
@inproceedings{schuhmann2022laionb,
|
| 775 |
-
title={{LAION}-5B: An open large-scale dataset for training next generation image-text models},
|
| 776 |
-
author={Christoph Schuhmann and
|
| 777 |
-
Romain Beaumont and
|
| 778 |
-
Richard Vencu and
|
| 779 |
-
Cade W Gordon and
|
| 780 |
-
Ross Wightman and
|
| 781 |
-
Mehdi Cherti and
|
| 782 |
-
Theo Coombes and
|
| 783 |
-
Aarush Katta and
|
| 784 |
-
Clayton Mullis and
|
| 785 |
-
Mitchell Wortsman and
|
| 786 |
-
Patrick Schramowski and
|
| 787 |
-
Srivatsa R Kundurthy and
|
| 788 |
-
Katherine Crowson and
|
| 789 |
-
Ludwig Schmidt and
|
| 790 |
-
Robert Kaczmarczyk and
|
| 791 |
-
Jenia Jitsev},
|
| 792 |
-
booktitle={Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track},
|
| 793 |
-
year={2022},
|
| 794 |
-
url={https://openreview.net/forum?id=M3Y74vmsMcY}
|
| 795 |
-
}
|
| 796 |
-
```
|
| 797 |
-
|
| 798 |
-
[](https://zenodo.org/badge/latestdoi/390536799)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/docs/CLIP.png
DELETED
|
Binary file (252 kB)
|
|
|
open_clip/docs/Interacting_with_open_clip.ipynb
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
open_clip/docs/Interacting_with_open_coca.ipynb
DELETED
|
@@ -1,118 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"nbformat": 4,
|
| 3 |
-
"nbformat_minor": 0,
|
| 4 |
-
"metadata": {
|
| 5 |
-
"colab": {
|
| 6 |
-
"provenance": []
|
| 7 |
-
},
|
| 8 |
-
"kernelspec": {
|
| 9 |
-
"name": "python3",
|
| 10 |
-
"display_name": "Python 3"
|
| 11 |
-
},
|
| 12 |
-
"language_info": {
|
| 13 |
-
"name": "python"
|
| 14 |
-
},
|
| 15 |
-
"accelerator": "GPU",
|
| 16 |
-
"gpuClass": "standard"
|
| 17 |
-
},
|
| 18 |
-
"cells": [
|
| 19 |
-
{
|
| 20 |
-
"cell_type": "code",
|
| 21 |
-
"source": [
|
| 22 |
-
"!pip install open_clip_torch transformers"
|
| 23 |
-
],
|
| 24 |
-
"metadata": {
|
| 25 |
-
"id": "JvaEkx8Cyvhg"
|
| 26 |
-
},
|
| 27 |
-
"execution_count": null,
|
| 28 |
-
"outputs": []
|
| 29 |
-
},
|
| 30 |
-
{
|
| 31 |
-
"cell_type": "code",
|
| 32 |
-
"execution_count": 18,
|
| 33 |
-
"metadata": {
|
| 34 |
-
"id": "vE4lFFkKyotX"
|
| 35 |
-
},
|
| 36 |
-
"outputs": [],
|
| 37 |
-
"source": [
|
| 38 |
-
"import open_clip\n",
|
| 39 |
-
"import torch\n",
|
| 40 |
-
"\n",
|
| 41 |
-
"model, _, transform = open_clip.create_model_and_transforms(\n",
|
| 42 |
-
" model_name=\"coca_ViT-L-14\",\n",
|
| 43 |
-
" pretrained=\"mscoco_finetuned_laion2B-s13B-b90k\"\n",
|
| 44 |
-
")"
|
| 45 |
-
]
|
| 46 |
-
},
|
| 47 |
-
{
|
| 48 |
-
"cell_type": "code",
|
| 49 |
-
"source": [
|
| 50 |
-
"!wget https://i.imgur.com/8H7XCH0.jpg -O cat.jpg"
|
| 51 |
-
],
|
| 52 |
-
"metadata": {
|
| 53 |
-
"id": "oOaE1AmDyth_"
|
| 54 |
-
},
|
| 55 |
-
"execution_count": null,
|
| 56 |
-
"outputs": []
|
| 57 |
-
},
|
| 58 |
-
{
|
| 59 |
-
"cell_type": "code",
|
| 60 |
-
"source": [
|
| 61 |
-
"from IPython.display import Image\n",
|
| 62 |
-
"Image('cat.jpg')"
|
| 63 |
-
],
|
| 64 |
-
"metadata": {
|
| 65 |
-
"colab": {
|
| 66 |
-
"base_uri": "https://localhost:8080/",
|
| 67 |
-
"height": 407
|
| 68 |
-
},
|
| 69 |
-
"id": "Y9Q6bhVA2L01",
|
| 70 |
-
"outputId": "1b920080-e8cd-4d2f-fb23-30e6f1b612f9"
|
| 71 |
-
},
|
| 72 |
-
"execution_count": 19,
|
| 73 |
-
"outputs": [
|
| 74 |
-
{
|
| 75 |
-
"output_type": "execute_result",
|
| 76 |
-
"data": {
|
| 77 |
-
"image/jpeg": "/9j/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAGGAiADASIAAhEBAxEB/8QAGwAAAgMBAQEAAAAAAAAAAAAABAUCAwYBAAf/xABHEAACAQMCAwUFBwIFAgUDBAMBAgMABBESIQUxQRMiUWHwcYGRodEGFDKxweHxI0IHFVJi0jOiFiRygpJDssIXNERTJWNz/8QAGgEAAwEBAQEAAAAAAAAAAAAAAQIDAAQFBv/EACgRAAICAgICAwADAQADAQAAAAABAhEDIRIxBEETIlEUMmFxBTNCUv/aAAwDAQACEQMRAD8A+ocW40kaHvDlXzXj3GjMXVW29tD8Q4vPdFhrYDOcH9frSaWJ5Gy3M/H+fKvIX6z0+SiqQHLMzserZ9eutUkHTzxz2A9fCjRb45bn2VE25GOR36+vnT2D5ELtw3MEgZ39cqKjyB3ix9et6mLVmfrkeNXfdmGMZxjp6+VZyD8hFXIxzz6+dTMmRhj7ue30qz7q6gbGuizkOefvpLQflQLK4weh8+vrxqh3JbcmmDWTMPZmq/uRG59CnTQryWLJGBFDO+pueenjTeSyIzz9/r50NJw/G4z5U6mhbFpLDn+dQZvAn2YpkbBj0qL2BycZPmeVMsiBYqBO4ztipBiDsfZTIcNY+Rz0FEQ8I1MccwcUfkQOQoycGpByDjl7KcDhR5FcV7/KySDvnkD4+vGj8oLFBZsE8ia5k5HMY+dPF4Sc7Bs56VenBCRnSTny9bUPlRuVGdBbGMHbbP51fFGxG25I61o4/s+7EnR76Ptfs42QNPlSPOjc0ZiOF8jAJolbVxjPwFbKH7PADB3oyL7PKWxpz86R54g+RGANsx20+yuixkOe4c4r6rafZGI96Rd6MP2Wt9Oy77nlU35cfQ6Xs+NvZSrgaTz6c6Ga3ZeYORX2G4+zKdU7o8qU3n2XhYbJvTLykzPR8raI68j51S8bliBua+gS/ZkK4YjOPGqB9mVeTlnOTt0FP/IQFK3Rh1hdm1MMUYkWCNuVa9fs6hXU+ABuSak3DLO1t+2OWbIVR1ZqV50Xjjk/RlRE5OdsGpLBNKe5G7GtcnD4lTW6LgdMUVbRYmlTs1jiVRkkcqT+QvRVYH7MbHwq5Zc6GA5AY3+FSl4TNCQrRlm6gb+6ta3dZhE++dyOZ8s12OMKT2smJW/sU7j96C8lmfjL9MU3DZgdJRi/kK4nDrhjkxn219Bfh7mIRKW1vzwvL14VTHw6KA4lJLcmJO1N/KaAvGT9mSteBz3A2TCjxFGf5G6g9ptp/wBW3xrYQSK7Na2aZI/Ew/t/eg7yze4kP9Xs4VP4s95vGpvyZDrx4mXk4Yqg6CD51S9n0Ix7PXyrTpw5FVSkLyEjujwGeZNQvrW2tk/8w6l2/EqEbn2/rRj5L9iywfhknsPAd318qqa1KYwKezzRuF0DC9AKGEBfoQAeZ5VaOdMhLDJCtYsbYx0xUzEpG4z7qYtahD5daisSatI5Z5inWREuMlugB4uVQaMgY59d6ZtbkrlRt7Kpa35j9NqPJA5itoznGABzqHZE7nFMTCRzFUtFpfFDkCUwNY21AdPOutETt6zRPZYBOnA57CrFjwNwK3Mn8gIEwNxtVgjXOPLrRKxYBIQee9SCciBv5UOYyyIhCTEwI5frWp4PxHs2Xw5cqzfZ4OQMHriiYXMfPao5KYedn1Gw4oCow3TpT624kGX8Q233NfKLTiZix3ts+NNoONsoB1493r4VxTgzfIn2fU47tG61aJlI5181j+0bL/ePw+Prar//ABSwX8Xxpakhfqz6H26eNRa5jA5ivnrfatwMZ6+vfVf/AIpbHecY8qP3DUD6C17GOoqv/MI8Z1Dlmvnz/aRXGdfz2/jzqiX7QZOA/nud/wCanwkG4I38vF41JGoUDNxxFYLq8ueN/wBKwEvGDj8fw+f8UE/FWbfWffv/ACKb4WxPkXo+jHjSn8JGT5+vhUP80VtwRjGTvnb6V8+XibFzlvLHr86Ng4hlgNWd9vb9fzofEMswrW3GfHw29fCovbLpA6DbyolCp659/r41xsHxz88/Wr/IyDyME+7DV4kH51H7tkgHl0AHr4UXpAHlz91dRVOQ2NvP18aPyMX5GDi0UDkDt691SW23GTjrv8qIAOdPPfl5+vjVkajnkc8+vpQ+Rm+QrW1Axn1+9Wi1BUch7vXwq5cDbpjr4fSrM7c8Y8/h/NJ8jB8gIbYDI0jHr1mqpbYeG/X20cfDr5evlUHUN4fD1tTLKwrILHt11d0ezb18Koa3TV3Rv126frTVotWSfePyqhosOf19bUyy2N8tC0wAcga4LXXtzPKmRgyMDn5evlUlh3ycYrfKD5gFbNdPIHb3YoqG1XJyPj6+dEhPjn176IRR7/XrFH5RflBRZam9/hvn61bHw9dth4Db18KNQDT6O3r4VaPM+3Pr50/yGWQGWxQb7ev0omK0TOdPrpVoB67n9frV6d316+FSlMPM7HaoeQ+Xr4UZFbKF5AfT9ajF0zy8T6+dFxnbcb5+f1qTmI5nEgXOw9dP5prYWajc748qBjIDdKd2pAXH5VOci2J2y8RgDGBXdIqYIqqSVUI3pFI6VbIyImk6thSi9EEIOph7qpv+NArphPNse4UiurlpphHIxLsRtnfflVIpnXj8dy3ItvLmLTlRzXI9eFKppy0ngq+XM0dFbm6cu/diUYXzFVXNqzMixIQreWcb/wA1TZ048WOL6FLF7mZk16IhzP50TFbJcSxTKp7JF0RA79Satv7YWwWNsHxHLboKb2cMEdpGHI1EagBthdtvjTIeX6CW1sdDh8E7M2F7q+AH1oOaAoxghiIfGok9CeuPGtCEM6kQxqoB758PAeZ8qpu4wkbtCwLtzZt9Rx+WPhWehU7ZmprUmPs0ZhjZmHj66VZacJMVs7rH3jvrY/rV9rZ/e7hRLK628f4wv93jv+taSZYJIE0DEX9oHh4iilqwyluhPHDJBCDLIDIwzt09dKT8SleVvu1vkaSVL45t63NF8cu3kxDaJJrZsal3J8h1oyw4bClqsCRNJdacs+dlz4mtV7BdAPCoHt4TDEgJbdzjn7a9MpVi8zMFB9g8h+9PYrNY42iEikse+x29frXOIRJBEHbSiqM6sb5P5Vq9s3JdIzst7MVEcMTDcaRnr5+dVxWYlw1++uTGNER50UljJcKDChCj8WroPOrTZtAhU4JPXGB686HKjcUDrZ8ND5KDtMAED61RMtnFIWZQZcZCl/0/Wuy2dw0p7Z+zjH9ifjb39KJslitoz2MCIx/E77lvXhWX+sP/AATyWtzdMuVEEf8A/Y3M+QHjVYto0Vvu6rIBzcnZaZXEvbSBXk7QD/SuBj6V6USOgSKBIVxpyRk1uT9BcV7EghYyHMiqPHVtU3ijYfjBbyom5tERdtOcbud2J8PKl8jrC2dWTzOBk4q0cno483j89xOSQjlihmi72f3q+GcTA6gMHxq8xAtjBP1rObPIyqUHTAGhAPWvCHcDn0o1o8dflXhFsTj9dqHMhzBVi93uqXZ0YI9jt7ia8It+XWsph5gmjA5beZqJXVzyPX50YUwo8/L18KiUwdhtW5jLIwYKc5G46VLU42DZ9tXsu29V6duW/rNDkNzOCRwy9ff1qSynYZPuqI2GknmK8Rt9T8P5ohUyRZjg5yPPlj6VMZdTk7jb19arQdORzirFGACDt69YrDLIRbKsCDv69Yqsk8uY8qIIGCT69eNQIAbf0aFm5A2p98+/Pr514k7DfPQZxv8AWrynl8PXyqpl0jVjbnyyMfSjYlnFYnA9evKrUdvHHr8vyqjAyc8uoqTnQxA5eJ8fXxrV7NyGy58fgPj/ABXcg8qgGGk4Px39e2u6upbJ9evOuYjZYCMYI610c+vr1yqCtnb9PXwroxjmBTUCyQIzsfp/FXIfEn3+vnVI9p5+vfUw2D093r5UGYvB8T1+f1rofzz7PXyqkNt69YqWefhkdfXxpTF2Vxz/AI9dagTk5BOevtqsscdf3+tc1ZH0FCjEidtufs9fCoZB2z7/AC/UV4tsQfLn6+deB235536b/oaKNZ3pn459fOug6agGydj69dK6TjbbYevXSiZkwQSB+Xh191Wq2NqGLYbwJ9ejUw+c0UKGK4A6gjf151NJMHb168KCEmFz0+Ix9KksoI3I9/r501jJjFZNs7cvl9KuR/Xr86WCbBHt9e+pifB+X1/ikY3IcJLgjx9nx/irluBgYO2Pbt6+FJFuTnPT2+vjVn3vHM9c+vP86nTFch4twAdz4ev3ptb3yhFBO/KsetyS2N/XOmFtKxGdW2PXurVZfApSlo1R4iqg77eVZ+fjEsplZDyGAa9E/bSLnONG4/T96rjtU7CQ9Gf600Y0e5gwqP8AYVJMSodgQvr51C1c3fEFOTqkcKMeGRn/ALQabzcODxyRIg7649xpVwwPDxKeZsBQMR+QG35DnTx09na2nH6jm4Jj4jHbxYIxkDwXkKvltAmCDiNANx18fpQ9gHuL2WViQ34RmnjWyyoi6tlxnbnVKvo5pS49mQ4lC0t6rsHMY7zAD8R6L7T+VWWZkup2VHXbmRyz9PKnc9h2haQMeoXbOkevW1Drbx2NuQmzyc89OlJ0yikmg63AS0OkM6xju/7z+9AujqGll0tI4wuDsmf1NGcPZnlwR+Ad3pz6nzpj90iYlTgjctjrjp7KrVo53Pg9mahtMsAWwm7Njwou4m14ggbs4Ih/WkP9o8B62q7iS9kpKEITgr9aWW8hjykAzpOpnY7Bvf1/OlT9Mr/ZciC2kbzbHSnh5e31mnraYYWjijOfZtn61zh9iiIsgfvDcEjGTUb+XsI2VVDYQgk7D4+sU1UiblylQrjkxcuhQHfZmHXrgUR/lEEshmca5M5Jc6gM+Xj50O6CIE6grHn3snPtoqxllXcPsT72P6eyhBjzTq0SlaKBirlwVGyqaTy31vAzSMFQ8v8AU30pveoiRlHZUdhnYdPZ6xWflt4knR+xBI/ubvY/fzoOxo7JK095GXhtlVCMdozHlS1rW7jkK/eBjqI05e/9adzXsyDTHEx0nBYsq4PvqtJJo2DNAMtsveZ/bgn8qCoNvoWpZiOQu7SKSeg1Mf0FFSvbdisUdtI+RuXBY+/n8K81xFPMziKR237xwEbf8qi7f7RHkZYBsH+fOhtDUmJbyxF1qXsJcY2AkCigU4UqS6Wgu/aV17+HlWvtkAXU0cu2/IZPx/Khbq80jWySRKNizRbfvTJ0IzKtGsDYMZj322xRkeGTIO2Kt4lcwyRa7URy5G/+qk0M1wkpzCUPSitnH5PirJG0NSB1rmCDjfn78/WuoxkX/psD19fpXeQ9Y/ilPCnFwlTOjOPpUQOeeWPlXc+XxPrfzrmdTbePMevlWAe2Od6ixA/P317I5nn69Yrx5Egjl7sfSiayl138jUdhtkVbo2xj4+ufnUW+eenr5URkys522PP2b/WokA+fmB6+FSBHjt+n0rh659u/1ohs6o7o9nt9fpV2MqDn4n18aqXG3P8AX+fzruvG+w68s+vZWCXEYHXPz/muFFwO8PL14VDXtp6Yweu3rrXi+SMZ+PX60DWcbOo5GPb6+dUse97+ecejVjP1xvn1/FUMw0n+aKNZ1snbkfZ8f4qvO/IE/p9PyrxYnO/tzUSx1AY3J9/80xhumoYz7PX0qWcevW1VKSF8fXrepBjj31zindRDADHPfPh9fOrQxBHXrtVAbkD69eFeDYGevnTGLwxBxsfdUix/n186o1e31+v511c7bjw/j6UGAIyduv6n18a8G8CMeQ9fCqi2Rz6e7+K7k4/f18aUJcDqG5GP0rp2wevt3/mqlYkdf3+tSzkfljwoGOlsE+vXsrsfeA3ztjb1uKgBz6+3186mgwN9yT19bH86YCLGAHs5+XrzrhTBxv6/WvYywO+fL18q8eY5Yx8qwzKyvgfh6+VcwBzqxgSB4+fr51HGCTWEOFuuf0NdBG3j5evlUCMetv4qQyV5ke3186JiYbPhj410nc+vXtrqjHx9e+ub/L1/FAx0H4nw8a9r89uY9eFRwTv0/SppGWfHPesZK2XwszrgjZfXOm8ULQ6ixymPmfyoG0Tsm3B/FjHr8qYxyPI3ZsNOSM0JHv8Ag+PwjyZOCbsGCopY4JyN9h0plbRg2UaE5OsjPiOf60ohniguOxcjWVYrvvkZyPhTPhkms95STrbblvyox2dk1Wwt1bJKk5Zdj4Uo7JTxVozsFVS2fZsB7+lP2j1MkYwMjAPr2UsnQNxSR9GrszhR54wD86ZxJwnugcR/dp8KdGofhPMCnVnG7w4kbY/iPmayt2lyLprhs9nqOkewfnmtbw/As41YbkchWx9hzpqKZCVNL61OIo1OPM9aR3VxJ95UuqpltgeYH8fnWjkXSwXA7NQGJPLb9KzPE0lv5gyDEAYksRgtjl+tDIqF8d2x9w6JTDrXrv7+lNFCgkEABR3vIeFJOETvJGFK6COlPHwkG4znfA3zV4P6nPnTU6EfEZEmZiV/pLuznbHlj9KDs4kup1cxnQozGp5DzPjRF86RxJbpDqIOTq/CPaev1qy2kU4CtqbmTy/ipf8A0XjqGhnojgTSBqY9796CuikMJkmRh4JjejIpMNkj5c6EuohcTBjuw5D9TVpdEYXexRcrrn0JbAMBq05/CD1x+lVSySpIlvGVMhxluZHrxpr92+5wtIArXDHctk7/AFoS2tGMjuRpnl5lV3Udf4qXHejojPROOyhjjV5y8jodR697z8/lUbsCK0ZuwYzMAcjb3ftRixFEIRNCjYuRgt5DyqqKFHnR7lstnKJzCnpnxNHvQvL2ILXhs9xM0t0MJzCYzhfPoB86ZXM1rCrRxhZCAMxxjOfaf1o+9WQkJGj9mpyXIyfd50luYLjt9cIKL/b3ggx12559taq0h1LkL7uW+Gf/ACEiL+Adnsc0NFC0jFpZJGOebdPYBRtykzIcW88rsukkS9OgU9fb0r1rFHFAwkt2HLU0hLEe2g0UUidtG8aB0uZjEfxdpgj60NfyWzRv2jh1U45fn5+dUJHJFKTDPmLJ0aV5e7xqt+F3FzP3XCyHlltIPuP5UKs2uxV93tJpsW34z/amxzVN2kkOAihseA5e0VoT9nhbIryjsph/er6gfaPCgRb9tdCGWaPA5PnBzTAdMUw3zv3Xi8sj186KDFl1cj8Dn60fd28VuVVNLPz2PP3fpQTSBz+DT5c/Q/Kg9nleX4sa5RRHHl7hUGOOu2PXurzHAxke/wBfOqi2+/PPvz+hrHl0Tzt69e+vat/H3VSWxgjz5evlUkYEHbp691MaiwHby9esVW24wa9kb9K4Cc7eNYCIELnPQV4Hbbn5evlXTjyxjn66VWTj3+Pr50QnS3T1/FQ1b7nBHj6+deLFt8c6gz4IPuomLu02543qsydNhtVLtkDfAI38KiSfj5+t61GstaXPX1661Evk46Z6+udUB9+ePZ6+VeJ7uPLHryo0ay3UM8xnz9fKo6sDIH8VUSB5+ZNc1efI+vfRoI+Vs/6gelTGM77ioAbeyrANzuefj63rkETOHffr69ZqWnLZ393rnXdOVGOXl6+Vdx3emPPlj6UbGsioBGAdvIdKmAQOuPXrNd04O+fX614DPTfy9fKg2JZw/i6/lXc+Xy9fCp4x66V7A8NvM+vjS2ayHMeVSFSIO/Pn7N/rXCNvL2evhWs1nUz5+757fpVq7j9B65VWOW/z9fOpg8zjfPz/AENFGslkeWPXy866Nz1z4+uv51AH2+6pFuYznb166UyGs43L1691R8yf4r2okk4Pr1zroG+d/X60QFbKfA868p2+nr5VYV26cseX8VDHvPn9f1oMBNT7h5V4nJ9evfXOQ8fb6/mvDn69e6hYESB39evrRljF2s2CRpAz7vpQ0cZbpt6xTmKzNta632ZgV9vrxopnZ4mGU8iKZZDLb612ZCFY+PnVltdiR0KYzgZzt3vrS8TlO2hGWXmCPb62qdraM/fXUM94eG3OpN7Pqo40kMLi1y8Fwq4IkxnT+HpRfDpXW9IKkBlGx6ePuq8qZETlp2O/UeutWy2ub2ExgEoSTjz/AGJpkmuiMpJqmM4p9UqY8CaDgy/E7oAfgYAE9MjNd+8RRzuq94xrvj5VdbqqOwCglt2bxx41dM5araFssQmuZJMMIIVKqDtnzHl+1N7RzpXVkEbYqTQgqGCgsTsPGl8d4BxAwA5wDlhsAfCh/VjXzjQ2lPaDQdlPPzpTewu0mtABFp04A5DpgUxjPaYB/Dy5865doexLgADOQW+VNJWicHxlQNwmDsGKknUTk55486Zzsxzgnljfp4n20vtX7MBnJZvZiiMuVPaOApOwzufGtF6oXIrlbFU6Ga4VWzoH9ijz6+HsqNm5WdgwOfIfAez50XxCdbSJVjQiRwcKvP25/WgbJJyC8i9mM59p/U0lVItF3EbpJoGtyMk7D2dfZ51baplDJuNW+W6f7qCDu0ywRRd0jMsp5ewUVc4Nqy6gEx15H9qtfshJXorllW5kAV8Ivh19eFS3jRuyYKMbuaEso9TkkaRzGKPngyozqP8AtFJF3seSUXxFUJkeV9DPLvjLt+E+HlTABlcHC8t25b+X0qqLAkCdkEjA2A5VOaVFYDLbn8QXUPL+aKRpMrRca2BVgf7jn9fyqhLJHkYupZiNye8asliYOuQ7BB+J2ycjr5GurKQjA6gB1Lafbj6VvYy6ISQW0al3LAfhAc7A/EChJJoLSFpezSFRnBI5+wD8q9LJMX7tizqf7tQbHwzQl4s9xIu9xBg7qGwPy+dBsaKFd5cxXcgYPy5AMMn3eFRi4Va30eqWWRcYYZOkg+Jptc2MrWf9krA9O6w8d+vspa8NxHZF5rbTz/C2+nrt+lBaZW7WgG7N5aM0ELzSJj+9Q2Pf4/nQMMEMsrfe2eNhuEKAg+zNPLeJRbMEjyeYIbO3TO9IuJrMkxfSZEUguo2I9eNMa9HJIFTU0cutRvpJ9fGrQ0EsOdR1fixnB/mupbW15AslrMNZ2aKTbB8vpUl4YAmmdGVSM6huP4pf+ksitC58asA593x/iqGfPl89v1H5UXc27W5Ckg5/uDZoI8/P4b/X86KPn8sXGVNESxGcZ8fX1qavjbr19tVk4Pt6gevhUA2Bjb1+lMSsI305Jx7vXwqOvC8tuo9fnUVkOSD+fw/mvczsfZt6+FAB4sud84z699RJ8/LaoEb9D51BlwfrWRrJlgBt18vW1VOc56e/9a6Sff7d6g34fl6+lOjWQBJPM1HPt5VPGF5fL1tUGTDZojI9nI2Hr11rhLZ/evDIO3wFebfp0rGoizH35qIkwu+nFeIznu/T+POoFTnYtz9e+igGmB22Bzn2ejUl5eXh5VEfhyQK6OfT4+vjXEhS4c99z459fGugY/Dmq1znw9nr5VJT5A+3l/FExYBnAx7/AC9dKmN+p95z69tQHdOfjk+vjUl2ION87Y29GkYp056+vXjXQM/t8/4r2M49evZUwu3IH3+vjSmPDkeWMe7+PyrhTb96mFJz45+dSCbDbxO3r5VrDoqEZ5DOfXrFcCHAPTy8PL6USFwMEfHw+le0tvnn5+P186awA+Nj4dfD+POvY38fb41cYz68fXxqOjHnRTNZALnl8vXyqQGB5fpXcfH16Bruknx9frT8hkyJXHtzz5H151HR6G3r2VesDE7D9fXsptacI+8RZO2ocufr20rdlceGWToTR2rscKCdvlTKHgkzt+Hpjf186e2drBZoupcup3J+X81OTiUSuY0/FjbA+NZJez0sH/j9XIHjsLe0iXtO8VOc49b/AJ0PNcRyqQ5xpBwB+XsrvELh3jCDAZhnl68aW8LtjedsJMgjfBPwoOXo9fFgjCNonw6yjm4n2OCf6e7dOf7/ACrUJZxW0KkbBht5nes1wiZjx2aJS2oQZH/uPP8A7a1UmPu6/wC0bDpyqkUuNsXNKXKjPJMDfQxQaljjwmB4DnTlJFjZjgKSME0saAwydtnOOePOpzRtJpXWccyRzz4VLkx3BMMtAqhnKgtI2SfLpRelD3VBHUnxoBZFj2YA7bAcjRmpXjyTg45U6eiU47sruL9bbnJg4wM9PrSl4QLqOVGXURuz81HTSo/OuvwWK4neQ3UhY/8Ab5+2iYrCMR9j2rghdORzFFW9BSjEOt7lGQCNtYXbOavuT2sSqSvPcgcvZ9KTWiRWs4tw5KgHG2RTZyFjBBA6Z9fnVFdE5RSlaKoXQ3GAQxHhv/NMlVAdR7zAbEjlSUzLDKJHKhm/CBtn2CmsDs4GCBnqfzo436Fyx9gt86RR9rMhLE4VEGon2/ShVf8ApmSc6WI/DjZR4Dxo+8bEbMmlT/cxOAPf09tJe0TtFy3aSEHHgW9o5e2mlRobQ2ttJEgDAoAN85J8vZXLuAzyKXUsF2UBvnXeG5EYJVC3XSMfKiboqhHTVsRj5+daStC3UjlpCAMgY28MVdcL/TwoPmPEeZoOMsrZaQ+X71c8ihf+py6k0INJAknysF0Kk7uGPg2cEZoC6l1SqxhaRs43bA9w8asnEb5xI7P/ALfniq4QkcZLKiEDcKdZPt+tG/wol7K5Zo8qHkZlXmFQbDyHh5VcLuNYwgmcKNtP4vXt6VGO4EoISJdWQNTsNXs25VSbMNKSxJUHkxBGeuwx+9azUEpOMuBlH8VYZ9/garMrAZR1bqAyn3jI/KgruG7JDdlpjU93sdy3kBvt+VVz3EscJJjMYJ21nDL4DBH/AHUvQaD24hM0RA0tvsyvnHt8vOll9dztausyhAvQDI8jj9aoteHpdP3rfJO/aIQj/I4z+dHzcLkaNVjc3CLyQ7Hz2P5VthVICivjJGEikhMi+I7N/bg9aDt5kkmZ7mPUgbSTpzjPQ+VXcT4cGiDRJ2UnNkc5B8x62oWwhuHEvbIrFRuy7Hy1fWsOmRaGO1u5Ee3HZOe667hais72Mja5v6ernn6143PZTaXV1cZABHx99Qjt7a7Ur2gQnkp5fxQ/6BnplS5h7SLdSDuv0/Sk0qMjaTt9P1FNJLJrfWquo0nffn68aVyai52/u556/X86yPK86Frl7KSCCTUBz2+no/nUzqxsMez18q5z8CP0pjyURJ37vr14V3lvt5+H8VIrg4PXn6/WvAYGRnn78/WsEjjO56VHp1znG3r5VPA/t5cvd66VwplTk+eDvWMVYxj6dK9p272+POrCDkk+jXmHe5593remCinGGA5EV5hhtvy9fCrjHleQx8v4qJTxPnz69K1oogfSCTtt511hy2BI8fXOrSu+1QljJA3Pu3o2YHIxVZI5fxir9OdyB7PXSvMp3HP20bFZoFB6g5+efrXtP7eyrQvXbH6fSvAbnb5+t64hWiIXIOfXv/WrEBBzvnPzqSrvy9euldC4HTTj5UQHiVJ5KPZ8/wCK90/fNc055gevXOpadxyzn176VgOgb58/H18auUbAdeXn/PlVSry9evZVwXA6evXOkMTUAjpjHu/irFUnp8T6+NdjX4/A/wA0THDn2es/xWoKi2UqmOm/r51LTsMfL18qL7HI93Xw+lQeLxHtz+v1rBcWgUrjOcaf0qDR9Mb+vWaK0EDGOXxz9agY874HhitZOnYKE3GBvv6/aiobbUwyMD5Y+lFQWZK62Hd26/D+aJ7QRZQIWIGoDrmmVno+N4M8m2St7JCy5OlhjY8/XnTX/wDbwkxDOMkgczQuteyYt+NdwQOXrwq5HzFqyCp04PkelOtHrY8EcfQt4rcNAYmXJV2WP4mgeKK1ncpJHh9snPLFE8RBub2GAtlRKrfA+t6jek3VzdIFJWNNOw5tilatHdB01ZLhlwb+FmbkAASef80wNoLRSsS4Dx8wOXLalPB4fuFncd0hQof2g09tLlL+1TS+ogc+pqiRObaeuhBYwdhxSWcZPaKFznwPL2b09upTHa6mO472PDfaqZ7TsdZQddj4Hwoe/kJ4Y2M9o4XBFJuthdSaZTdX8hg1JozjOfz2r1hxFZlC6DgjbAOKHLQzWZQOAVXBfmT68at4Wn3aQkHC5zuN8/Wh7KUuL0FmZozj7qxUtjcbUclwpjAaMjPVahcduyh49BbGMAb/AAq1Yn0KxDbfixT0yMpJrYC7GO5yDpU8yen7VXPxGAXQhMZ1sMqwOM1bcqJ42k15VT3vP140FKLCa37GZoyxOVzzPs8K0dSM6asou7ia0lEvaFmdsaV5DzPn8qd2MsdwiOcFiPn+lAzcNs7i2CyK7YXukNg491WcNEETGGMNpXbBfV69tVVpitpx0EXUiq+VEYJIwSM5Hh7KKilYYDNjlVU0JZlPZsQDnvNj0a7JgoCCpGOimlapg01QXJJGU3JyeuP08aCMpbZAq42OjvHHn9KMQgw5KqRjrvQwyshVYwif7e5v51VE0iPD5Jo7h0f/AKDAENrOR5cqYzSRLt3dQxsTjf11oGJmS5UFgEz1xz8vOiZoYLiEpMNSDlgfLzFOJJbspZ3eTvMcDfA2qOBKQcDA6Zz6FUTuIFYJAzIvgcHyr0UpZDkE77ZGk/LrUlHZT0eaFPvXeTl4Ntn9D51ZmMqO4VBO2MLv+fuoN724sEJ1MxbJwQNqjBeGdNcyYbbKjOGHj+9UVIDtjN4VjjxCqsCPwl/W2/KqVfsY2WZotv7Qv9vn5edDqjKSy9pqH4SD++9WG6knl7KR1wvUd0g+/wDLlTUmLs4IlCM0UWA3exqK/wAGqFijMJIXub7nvevZVN7f3NmgERMbDfUwDI3t25UAOKXSyB5m0h/7wDp97Db30jpDKxpJpjgDRPo8QjbMPz+lKf8AO5reQwvIG35sc48iRjbwNMTZRXCa45QpbY4Hdz+WaUX/AAeZGLxwylWG/ZEEjxIHh5VtsKobfeLW8hHbSgs39hYbnxHgaTyR3EVzojV1P4hjfI8R/wAaDtriSymCTQpKNypIK/n0pgbuCR0ZX0tsTHL136NS8aDdC+4aRptF1bkauT42YeBrj2zfdysY1b9TgqfrTjiMy9mJNJ0DbOORpdlJI3bvLIv4SpzkeylGbtUAySSdloKHtFz2bkb+YpcmmRWOkow5jn/IrQ2Z7RSk0YLg/iHhQXEbYJOxMekk8+W/1rWcHktqFS2hSU72wGOeT6+dc074wRj18aJ0csfL6fpXDHtjxrWeJ70UaRt8vb66VwrnrRAgY+vh/NdaBgM7+VazAuCNsfHr68a4QSNuZ8NjV+jbHP2evlXAupc8h57j150Uwg+nbyx09fKuqvw8/Xzq4xfp69vnXewOnJ50bMlfRSFJHr1mokDoMDflV7RlRyO/OqCM5PWsN0VPsMdPHntUWwR+eatcZbz8Bt6NQ053A6bYFFGsq097ODv09daiwwR0B8PD6VeELHJHT5V3sTqIOdzpprQLHwTK+fX21YiLttt7KmIx4jHTw/irdGP3+v61wWAq0d3mK9pOdwefv/mrtI59fZ19dK4EGOYx8Rj6VrBZTpxgYHw+P8V4DJ9esVcV33Pz9fGoac+32dfXxomJY2+Hr966BjY+v3rg5j169lSA1EDbHny/itQ1F8IBO3LHhRqkY8ff8P5rlvwy8lAdIWK+Zx7Pf50YnCr7mbZj/wCkavkPyp/in+HRDG12VAYPUtn2HP1/OvDvHugeWP0H6VNoZV7rRsu39y+tvyoyysWlIJHXOG/X60nxtuiixOboCjs3Y/hGMZ35Y9fCi4eF4JD4BOPb686drarGpz4bVOJI9bM7DOnAFVjh3s6oeLCO3sVSQjRp048seFAyJ2cqyKo1cl8Nv0p1JFoiLjDyHOPAeFIuJLNDGmBkB8+ZozjR6eF+kUmbTP2L/hbxNXffQuq2j5h+nPfl76Q3d4UuI3PMSac429v57VfwwO/ELiXYplSu/LH6VJb0dMoJbGHaCO+Rzjblt1zv/FTuphauXQbTNlttqjctFJM6NthQVaqrdHuoyWLFUYLjH4vGmX4I/wBHE0GpGUA4KYP6Up+zrlb+aInTpOw8fGnZmjZR+EDbbyG3wrOTSrw7iYuHyqsx3Pt6/XzqjjTTJxdxaNTfd2MkDI8BWes7z79dSW5U6V/EDTQ3aX9qWibOrIyOY2/Okn2ZxbyXUkmnOSQR4VmrYceoO+wmXhgtEZ1PfxnB8etE8MgmNqXZAWblXm4rbPcrEWEjNgaBv8/CmgnRI+8wjHiTjTSqKvRpZJcaYrhtrsXiM6voUnukZx4b06bWCGTJ23WgZ+L21tqLznT0xv8AKom7ilj1R3BUhc4PPHs8POqxUSU3KXaIB9ZkGXhlBwcjHt/ikvFUtp40nEidqhyRnZseudMpZFubVmV28iDv7Njz86TTuq3DoTrJYb5xvvnYim4oCdDS2uhNCjQaJEbZg0o94PMZ86lYFVvZGJZWboR+R5e6k1r2kKmTQj7E6lcagvX/ANQptb6mCtJlkYZD6cEHpQdaG/R6VDjIwdti3LFTA/pld+fPJ/Ol1vfBZuxckFTjB54p0ume31JgnHQfHanSUiUm4g0JbJyRkNtjxquYAtqVtwNgF/cVegeNsuikEZyN/QrpUM5KkZHMH186yWgctgE8CSJ+KQFQCQRtj217712M4XGpDjOeQNHOwUBNQLHltjB8aElkaSEAlHVdmKjl40/H8NdknuBIM4RRnTtjn4ftVJlcxiJ1Rh/qHh9KrliInzswHP2dPdvzrz9np7MoWLNg6Rnf6/nSpWwvSK+80470ak8ldh7Oo+YrpMJbRNFCUJyjxE5/auG1XVG6yFWx3QV9ZHlVNyFMn/mQixscCVRjs2/3DqD8qtw0JyPBJEZXGkREaQxDKfZzwffVilh/TmbKPtqbvAj2/rQ0aSWUzB3Ywt0DZGB62aiZLuK0QOSSjEKW/DhvAjkG+RqdNjXRZY2pjZsq65Oz/iB8Dn61644fb3ClWRIpRt2kZ0/Ll7qU3d/lf/Iyskn4hp7yEean+32bihxxK6nhKSoHX+2SJt9vHPOi4xQqcj01vccHkBz2kbHOQ2CR+tEw3UV5HoupV1asxSo2k58M+NVW3Fw8YhkRpYydO43Vh4jofzqNxbwpJqijAV+eg/3eG9THL7odujxlkkddiuAHU9G9c6AWCCQ4fvOMjtEGx9q/pVUkFyJFy+E5JKp3U+zw8q8ksgmeHiSR61wO3Xu6gf8AV9aKWgWHgqYRpljPd0nO4K1nL6DspS8DbdVB+Hvpw4e1mLSASRA8xscUr4nCsTia3fMbbgDkR+3UUtew2D23E5UOmQZYdeR/nyo08Tju172dh7fQpUV1L2mMIT3vLyP1ppaWEMkbOjd7+5ScGknRPLfB+yoqOnLGfd9POoquW3559e/86JKacDGceHj9fKrLeHLDljHPy6+6pWfPS/tolbWpcju/D18qN+4hk/D092Pp+VM7K2DLy39egaZ/ccLspz5c/wCfzpGy0IaMPd8PaNs+HPf18aCEZzy3/Wtnf2gC4x8Pn+4rPS22JDsAPPl/FFTElGmDRW2r2ezPr2UYlmGXkCuPb69tWW6adj8zg+W/600hi1KPHPh19nj5UHkL4oIUyWowcjfO/Tf60tuLJU7y/IevhWmkgXTyzt7Rj6UDPCpU7befy/Y0FkDkiqM8LYN5j9KmtqOo9uT6+NMktsuR+3o/nRAthtp3PkM/L9Kf5DkYp+64/tP65+tRez1AYHy6eulOmgXG2MY+X6ipC2G3PmNuvx8fA0PkCkdEOD79x1z9amIfH17v0o1YMdO7y23GPp+VeMe2Ovn6+dcvM1C8oB+/r51zSR4/l6NFOnxz6/iqSAOXr18qaxSrSDgeXQevhUNO/l8fQqwnr69edQ/uHt8MejTJjR7L/uyKgMkqJnox39edH2FnbPfRAz5OoHSF60o4lCfvTkA8gwx4Y9bUZwkvE6u51EY5+Fd8ccYtWe1Dw8fHl7HtxFNK7d/CjljkB5eXlQws5RJqV3HTn+v605RAz5DZVu9jOPRqbW+OnTHL416qSaEWtCr79e2v/wBZ3xzDb7+vjR1nxaC7j0jEUvQqu3woHia4iJHLzG3l7vyrI3M01nN20TlSDnc7e/60rxp+h1RteKG8t7cspJyMqw3UjqazU1/fBlYMcf3Dw9eNM/s99rYbhhZ3rDEhC6ugY+PhR3GOCwGN54AVLpsByrzvJ8dw2jqwZ6fGSF9tfz6Rlj4AePrwqV7dExl3jMiKCSBvkVVaKJIOyfusuF3Hr400WF2tWGO9yx69GuPZ0uSTujGcQdSrEg4Dl9J9v786JiuhYy2Y7Mr2j77/AC9tM5ODpeyGXDDK4AA2x5/Sks5eSaHu/wBKN5EbPXB/KjFFXNNUMr2RFjZ0YaW7vs8f4oq0U2n2cgkkfLHv5I6N+lA3cYThrYDDWwLBv7T7ffzpzEEm4UiOABpwp5dMfHyqkY9k5PSFXE57i3WJkfZhhiT13/b21OADi1u/bZVR+Hbnlef7VZLbNccORWKlwMKPEDn7vyrnBR/Uuojk4xgfl/NGKt0Z/wBbO8FjFraPbvIO7nbfl9KV/fEccTFt3CrBFY9DyFEyxva8fdFYnVFqbfl5e2g+BKpW9dth2urV0OPzGaavQqfbJ2dr/lkP3lh2l03/ANRj+A+AHjRVnHc8SZTLN3M52PralfFb2QBtbIi8o8Np1HHvyKbcFilXh6EIhZh3tROSfPalcG5UNzqN+x5HY2cI15B8STnNWvHbIraIwjdFGFrO8RvVgZIO9IJTnATYez6UJLgqig3KtjkGJ+JJqsYrqjnk33Y+ttEbd1NIJI0shOPKlHGQIblJXdexdhjJyOXQ9Kt4fN93YRPJKNX/ANOV9Xsx4Hzqy8aK5DRNCW/uaMbFvZ5+VdGLHd2SnOgJLFY07a3f+p46tmHn9accOL6VDjQCN0I2z5Y/Kk9rbG3DGzlk0IcvBJ+JR4jPMeVN0a3A1JGVXOxztWeL8CshffwacS470Z3OCe79KacNmDIAmdtsZzj3/rQ5idoCdWo5xt651y3zBcbHunyqHHjMdvlGhzKGkTAcK3MZ29goNHEjMrqquux33opizR6iRyzkeH0paC0lwe0CN/uXun34/OruNkYMuLOswV1GByJOf5FRJEb6mUjU+x5YO2Afr4VKQ60aLvCTGVPLJ+teiKznTpOplLBehI9cqZRM5FMmpYQWGTg9z8J2zy+lQhNwIMPkherf6fpTRrIyWtu+klUPePUbbfnXOIpFbWZdGOV30j/d0qscTqyTyp6FlxMkS9gT2oxrGsgkD1yPlS+WZIWeLJMcg3BPI9PYfKld5dSxTPI8qgRvpBK8snmPAeIomeBEhjeSUYP4HHeGnwPXHnWataGjrsgb6V9aRxktGW0gfMeYNByzz3EKSwSRujIP6LnfzHny58xUJeNw20rJINgo3zvz5g/rQU/F4XbuaS+rUAV0gnx8j+dScb7KWddZ3BkhQaT3lORnPurkM87BwkmqQd7KD+4c/l0pZNxYCZlCCMP0zgD3eFBz32tzMhIkxltJ/FScfwJoEuY7hHnZ1inDY1ae6w/3URFduIGKMCpGZIn73wPT218+PErz73qJIj1aSRt8asi4rxK1u8J3W5hlG2fHHUeVbi2E333lJEcLIS67hc7nw99eGniJ0A5cR5DLyby/asl/mE00zyx9wqMMo5D2eVO+ATM8hdeeoMd6WjDWC7QpLbTgALGOe4xy+H5Utvomt7N5yusRODIB1U7aq5xRtHEGdVY6CEdeRxRPDo0khmR2JAhZfHVGRv8ADwpTCW3nEd2FchoJP7hTQQC2mUpqCsMr7P1FZbW1jdyWsoDwk5B56fBh5VquHx6oV7+uPwJ/D5eRqWTSJ5Z8Y7DhAHOrx/L6edFw2+ls43zXYVAIH5eufl1o6KMZAwPX6eXSuY8VxuVh3Dhgr8dh6+FPQiiPpjHy+n5UntmCd7bxyfl/NMRcBF5n9f5pZHRjVIC4jHscqc7dfXxrL3UY1ZH4s9Nt/XStFfT6vXr3is/db55H27j+POgSyFEexGPl9PzFHQTaRzHLx29efSl4JBPj7fXxqYkI5bH55+vlWYsZ0MZZc79SfYc/X86EmYaTt8B6+FUdqxXbGMZ8v4/Kou5O36/r+tKkF5OROMqT0xj2+h+VW7e7zP69PbQAkKN1zn2b/X86vSbO429m/oeVPRIL2yPHPIj1v+ddJUAAfL5/uKCaU6huP2+n5VMzd3fPnn1z861GQ9k7pwc8/f8Az+dUO3n7MfT8xV8yHAyMD9Pp+VCSKQSSCfLPw3/WueJRxK5G7pJ5c/XlQrvzG+f1+tTlYj49OfryoQt3T+vL+KokTaJF99vbsPXwrmvOOfxz6FV6seJ9/wAP5rhPT1/P50aFQVIXlte0U96LAbHMj11qdjPhgxyy599U2s7QSq4GehXx9eFF3Ft2SfebZc20m4B5L5Z8K7sb5x/09nwfIUo8Jdo0ljdxXS9ih/qRjUD4j10pvC6TJpfHLr6+dY3gmtZGud+fU/n9a08jF2EsewYZAHn65V6GDJqmNmh9tF9xwqSeJgFfPkPW9fOftDY3fD7pYpItcThjkLjl0H0r6HBfSxrpZDp8NWQB5eVA8dsBxq0aJ3Ck9c/n9a7ouL6Ixck9mJ+x11a2vGGiubIGF9h2g1ACvp11bqbAmNiYz3kzzHl9KwNxwCSCNpYmdbhSMjlr9nn8jWu4HxE3/BtIVxp2XWmn5fpSeRjTxtBUvumjP9m6MVAywJz5+PvplbSkxquPKrBw8KzF21HG9G21qoBOnvE5x6618/wbZ6UpxoqGWhygzz5+vnWKmaSz4pLDMiFWIcDV/u5+2voEduoBxyxt5ViPtpbIJYpFUkx5ZiNsnG3up+H6DHNXR7i8oLvHjJlj1JgeHOrre87Th1ujYEqx97NILjihveHxOrES6+ydCwyDjrVTSzQRxMRIkisGYePLpQ4tWXtUkaCxu2ls0jcEuh0n+fZ8aYWESw3Uj8tttufrwpBbTi34miqf6Td04HPb8qeLeQkSnUO01bhunl7POjC+wTfpCu5V7r7QdmFHZLGSX/1Z8fEflRHDbVbSxkjZCNbkkkcqDjutPFC+sBCMszDbn0+FOJNJspCjAxhe8wP6daKt7A2loyfGgl1xS2Ts5GOrGAuAvnT651RcNXsZo4wdiV7x+A3FC8JslnuZbp5mCt3UkfcKPp59Kq4myNeqDZSLdxt/SlVvxDwbHo1eEHVsjKfpEbS6kZu7EzDkSy438cneqGN5LO8iPpkDBVBT8Pj76eRzSxQBpURScsTjb15UDaTKxIWSLVICxK75HT3VsScpCzaSLrCwKl7h2Ltjun1+VNXtGwJUPt2zjyPlV/DEQwPGwweu+/l/NXynseY7vVl2Oevv/OvTjCkcEp29CGVJI5GYjOnbfYjPLBoi3Oq0w+W1DmBnceXQ+XWinMU8bZALb4IG5Hs/MUohlltWZZCDH/r5jH0/KlcKHUrGXD7xrd5lWPKI2GGdmBHMetqNmmh0E5xg5ydiB0P70uSN5gLiLKyqNEi52bwb8t6NaNJbcTIxBXwG6dNx+lTlitjrJ7CrS9CSNHqxv12qFxcMsbd7LL3sxjcjrt+lZxLqe3unt7lQVH4G6Y8iOY/Kj24ijtGJdR//ANi8/wD3fWhCNqmaTp2hn96SWEvG4zzUn9c9PPpVcN2JrqAK5XUA2/MNy3+GxpWIZ7Z3eGZHjPeUgaW88j9etR4bdB75Sv40cFlI/tzg48R5U6jsVvR9EspEkVk25aW264399ZTi1xNHxFon30jTpG4ZTyPs/KmXB7vLyxs/fCjTnw6fLrSL7SXizNJKh0zx90527x/Wryj9SEX9gNtMspZlDps6+JHh+9cm4bBKrwxTaEl7ykNgZ6/+k56UiF9dWnG1y2LV2Lqf9rADH/yB2o7tZYr54JnUpoZgFOe7jc+zOK5nE60xXe8JmY74LrqSQdD7vPqK5bcIDW+k6Sy472d1PgfEflT1bkTlZEXSJOzVw/XVyJ+W9QkhmFyy4BRvwuOef7c+edqhKLRRSTFg4NHOoFyrKQcbjHxPj59aWcS+zscIUDqe6R0PrpWzjhka37R0IIG6+A6j9qS8UlaEO9xsgHdAOfj5edJv0FmF4tamGJUAyzHSMb7VfZ2JjjQykP3SRr5ir5I/vU5nmyAD3FHj4+udNLaJJLJJRtqc4I/tVev5/CqdIXspHDxHrY8tOomrODTmzuUddxq0+vpTC/mhit2VSMyKpz7vW1JLeRkkKnGpipUZ8/W9Ssfsc/aAluJpLDjE0XLzX9KG4bfFlkeIDVv3S3LxHwqNwZJOKxgn8MmBQ0SDh/2iVv8A+PLkMANvd5VN7D0Tlt4r4BwNMitgY8PD209s4VgjRUGwG22dv1oO2tkiZzjIVu6ehHSjEk0HB8epx6PnXNJ2eV5ee5cUMEbHMD3+uXn0oyNyTnr88+utK45s9d8+/P1ohJsKeXL17qlRypjWOUjGD7/2/SrDc4TGRy39n0/KlIn8/bn1866ZyepHn1z9fzoMflQTNLkkePn6+NAuM8858tj7qkJQ3L5evlUsA+Xz2+lKTk7B1h6fkPXvFRMBIwMYPw/jzozSAfPlz9fGrYk1N4+71v5ULEBEtiyjnnPr30bHw/CA48uXx/iiYYF1+Xyx9Pypk0C9ln1686RzovghZmLqyBJwoHlz9D8qBaFowc7jzPr41pp4Bucb8/Df6/nSqdRrOAceQ/IfpTxmDNChYqsXA8/n9fzqx4u6N8evW1XBFDDwx7sfT8q5qy243z1+X7GnshRqpF1L1z588/Wls64O3Ly8PZ+Yph2i6NgCMZ8sfT8qW3MnfPt6/X9a5oHoOuIBOBncZ6evLzoJ9+Wf1z6+NFTvk7AA/r66UI+2+2Me3arI5MhWBuMDln1+1e0kHrjbrn1+lWY6HHx9e41wkHfrz5Y3+tMSPAf6hn3+vjT7gt4FSS0mGYpdsN62NIV88eWPXyplwyNXnGQu3v8Ad7PPpVMM3GaorgTc1Q2jtFt7CVVJAxzHrl5UyspBJZIpyZY+63t6b/rQbSNojGfxS4IPX186tgkSOEiMZDMW8/h+ldnPiz3uNoYpGpzseeM9f5qp7Uzg6dSnoV/T6Vfbvq7zHAPQ0wRVMfT39fXjXp+PNezkzJgcPDWa1VnGQBtnvD+Kvs+FpbXEki7B/wAQ8/P60YshVMLkCvNN/T06RnkP1q8ssXaOdRkugWSDLHGPp+/lUI0CbjkPyqbyhF35c6GNzvgD215WTipaOqCk0XMuhyvQ1jPtlJDDZt2kX4s4J23PhWwaQSEHntjlWR+2lkl1w2bJIOnJxyA8/KpSL4u9nzvhcjCaW1ihDZUSRNnbV158xT27uVaG3XJLKN10bKuPy8KwiTvBexGORZlXcEMenSt9eyJcWNvcW3emiwzHbvA/k3PbrRlBpHQpbRFl1d4P/wBXHPmPlzqM9y8N12nYhUc7uhY6Tyol41iMKSZYFe0LYz3fAeX5VXcWzRRCVFzHjePp7zU0gtgE16GR5BcaJ4X2jP4cfStGslxcR2iHstMx/qtH3TjoD58qyXE7Sd3EowG1BiucasbmtEFukgsezc9qsfa7/wCpjj5DPwroxw9kZy9BfE5RZwwdk5MTHS+eeMZ+ODWbuL4Wt5apE/apjUngFAXPnzz8KLvLe5vLe4BJVh/0ieRC8vdgfKkcv2fnedT99ymrtNGBkat8fOtKV6NCNdjziXFZb22FqqIRKdLKW2x5+XnVvC7KbSr6VjRf/wCtdINLbSKGxxh5JpWyuAAD68qfGdra3iRolQYDaAc7efj+lW8XHbtkfInSpDm1ZrZck6kbkw+f8UUk6MNJbb40mt77tEA5jVuT6+Br0t12eEU8ySpz638q9Gkzz9pht/JGo7mz6txnY+uhpZNhl1a27TJDHk2etLJ7u4LuVB1R7FejDxH0rpu2dY+2yo6MTypVG3RW6RYt9LYFZFYlB1GSMeB8qKi+0EMyLLGwLPswJ6+39aS3fELa0kzJKdZ2WVOvt86xnFJzaTPLbOpR21MNGkZpmqAvsfSb2dnjS5t5O6eY6Z65/wBLfKhEv+zUS74UZ1jfHrrXzQ/a25AIE2Jf7gx2b31bw7jl5dy/dhM7NN3QDudVKlGxvtVH0ZPtTENL6FKIcP1GD+lVtxqNJDNDuFOWCnfHj+9MeHf4d2Y4eHv+0aaTmVPZncbjAOKPs/8ADvhduweKScjBXDNkacYI8xSyy44vYyxya0W8B4okvFP6cgKlSxx08/2pVxyGbiV9OY37KBt8g8x9KfW32bt+FxabQnUw0sztuaieFKQAwJYbc8E+ftqWTyYV9RseB3szcliwkjjHfKY0sTsAOp+OxpnEkUgiAALr3S2MFv8AV9MURcQJFGVw409cZPrxFDw8QifvawuARnG2Onu868+XkNs7ViVFsVvCVMci5jfSVzzXH774qwSTMXjQAIpCZK6vQ5e+idK3CnZSCR1652P716e7hsoXdyqgE7nY5+v500W5CS0VXUz2VnKZpDIy7sThSf3rB8S4h97lMzlVhGw6aqj9o/thFMzKNiF3AfY+vlWDPFpuJcTUf2puF6Z9v61aON9kuRsrm8ieFU5PJz0/2LV33kx24gVVCEBdznujp+9ZqzlYk5DPv33HIEnx9c6cRzf+UNwMhhlVAG/P1tSy0OmNJSJG1E50nffmcVTIqNfwOvdWMgbf92PpVUYLW6nA72/u+lX2MPa7tsVbJ3+Gak2EZIomuyRthtXiMc6qjU3NyySryOfZRUQMFzG24cDSQPPnUxCEmaXbJHT18q55sjnzKMS4Ds4wi9Kqzv65euldaQnbbbzyP4qovzzzz18frUTxsjt2XBvZ6/Tzq9JcePP30vMmCD8MVIS/l8qFCJ0HdqduufD5/wAVwzZJxjHr5flQolzn559fOuGTz9fWtQ3IOSTvDfHt9fOiY5eWCSc9Nj/NKVkAI9evZU/vGBufjuKXiaxuZAV6Y8h6+FXQygEcse3by93n0pJ97IU8/XrnXY70qRzpXA1mnjm73jv1238/P86M++ppxnHrfb9KyycQyOeBjpvt9PyqMt+dR73xb4fzSPGXxzof3F3HpOCDt7dv1H5Uqlk1ZzjOcbnG/t/WlzXe25P5H+fzquS7AXY4z4evlTRxME58guWfHm36/ofzqgS8j6x9PKgGus756fLp7qgbrB9/r+ausZFmsmv9IOM8/n6+NBSXGfXr4UI8obljGM+WPpVTP4+Wc+vnXKo0O8jLnkzz9ftXUk3wd6o6jx+Bz9a6WAzvt8sfSqJUTbsv0oTsce0fH+Kg6OuM9RzByMfqKq1HHwzn186msmBswBJ/C3In9DR0wWRDYb5/T+ae8DTXK7f6dj0P80mZBJ+AaXH9v5kfStB9nTELWTUwIJC4I2H7VbDj+50+N/7EXXz/AHcwFcnLZyPXyogKpmDocK34s/hOPOh76IuyKV3RwWGfDl/NW2UxlYgpIhz+Fl288VecLlR7sZUrGkA7wAGBnx3/AJpmmyjGfdQEYCgAgDarjLp5EkVeEuKIzXJhDzomQT3RQ1xxGNcDUOYHOl17O65G/r9az91cyvMhAB0nofH1vUZZ3eikcC7ZpJ78NqVWAOfH18K9FKCuQdudYq4vGt43ftFeQd4gvsF6e6mn2ckmunMs0uonGB4fvUm3JlOCjG0aNJ9MelyAccs+vjQl9ax3NrL2qqdYI3O58vbRbDTlwgbbAwPXwqqd0WEvLsdPLnt4eyrxi+mR5e0fB+J2z2fFiJnKRoxXAXz/ACra/ZhUfhcqMv8AUB0jwxjII8Rjr0oj7VcItb+NmcldtmJ5evGs99m5pra4jsw/aRxE9mEfI9eVdKhaA8tG2SMyoiuE7SDYezw9tSkiImaFlysinQp/tPUfUUEb8zjXbjJRmVwdu6MZ2+FXx8QjazNwW1iNQWOc4IHX60kcO6A8vtC+64WsN5DCqg9oO8dzkeB8qdCBUkRrmTs4xHhQDy7vreg4Lw3nFQxRTFoDq45g9R5H86nfTxT3JVYdbL3i5OACDy9mDVZw4KgQlzFfGOIyyxRWvDWysYX+o4I7wyGz54oa1g4jM/dtmeMMxwSVABPOndrZieRDJH2bINQ0nYj2++ncXDEZtbSSICNwR19tc1cmWb4oz1pwuG2LSurO+AWRRnvdN/1oTiAmvJDgaFPdxpwf5rY3NssFkRC79R3+Z9n0rPvJHEBpwcbeO30rtjKGGOzzM+f7ANlbPFAIs7fp66U3SCEQKHGXVgwNBrcJpGB3iMb+vnUnus4GOuffWl5sIx+pxzzWFSww97IwcjJPr51hvtFxcW/EGtohy/FjkK1xnZ1ZV3bSQPX6V8s4je9je3P3lTqfYE74rYckp7TL+P8Aa2wW9uWWFhFMcsMkA8vH3Uj++8RdGTvOhGnBGdvCn9h/l9x/VnmjwM93OneozXsCz9nw61Mq9GUbVZW2d3FJWZOSCYZLIV/Svpf+DX2Uk4tx8cVuAPulkchdiZHxsMeVZeeDiTRsZeHELjmcAY9tbr/D77R/5Lwa/tZdMFxrDqGbn5eVUjGic5a0fYOJM1zeW3DbEgM3fmbpHGPrTkW0UCaQQdsEnmaw/wBk+NQr9oJ5LqQF7mJSGPL2e2tncXEMikK67rnbzqGTFrSE5O6YvseGyTTPd3IKgsRGh9vP9qJubSMR6kA5ULa8etJh91mcRzR5BBOxHIH2VDifGbHhNuJLq7jjQnALsBk/WueXjNaSKrK27bFPFMJbuZBjA6evnXzLiV2lrNIVmILE+RPl+1G/af8AxIiuVeHhULzTNkBtGAPPz9lfNpIeMcZyWk7Uhe8qEDR7c0IeG7tl/wCUq0fRLP7aQ2kRi7VWHhq3Hu8PKlX2p+0t9cwRtbIoVx3XDYBrK2PDIeESxXd7xC1hkDZ0Oe0cj/0j861F5xnhdzbi3jhWdpBqSRF3Xr/djcV0wwRiQnm5GGTh17ezB7ojDN1cc6c2ViLIktDpUZDSDcn14U9ThEUlgt4H7NMd4HbfyHu5VCfhxEEZt3aSR86kcaQB0OeRqzhFqiam0K7q/UzQ21tCBDq3PQ/tTGRmKoqrqwe8T19eNJJbe6iP9e30MmcZGk+3ar4bq4iMelnKr+HB5Hrt41CXjf8A5GWb9NnEqMulOaxqN/8AVRdppiuVBG2xO3r4VmuHccTOieFwORcLyrRoyvGJEIZSBvnp66152aE4S2iebO1tBk8oaUsoG3w/iqWlJz+p9fGqsnV1z88/Wurk+WPL18K5WedLI5dky59bVAtk/QV44wBiqy3PfrQ9inSwGakoJ51HTlsD9/5q9E7nlQAuyA9Y9fKuZ1bjw938VKQY/f186ki9fWaIaorJ2GdsevRqppCPbnptV7ppG3rx/igJcnpz99FGLVlzyz5YHr4V5pCqjbP0+lQjGnGfHx9fGuSZz9Kxi5Z25ajnz55+tekuNwRvnw9fKg1fDEDlUpXzkj166UyiFFhu+mdv09fCqJbhgSCR4c/Xxod2wx73WqGYvgLnPl6+VUjEpEK+8kHn8fH61BrjGPAeXr4VAQhl1eA9e6hpg6NsTTJCs1wbJx1zvn1zru+AQfl6+FUhu9jp+lXLv5cufr4GuABINt5efr51LPPx5VHODXjjl7vGmoU5gHG++/r9qkUJHdBIx032+lVhvX5fzUtWMYzz8PW/50AnhrjIDBvHfbHv6e2n/ApmeVo9WnWOR/1ezx/OkUbuT3dz7Pjt+lHWb9nOr6GUjfI5Y9fCujBPjJFIS4ysbcQaUcXtl1gxzxMOWdRHrlTOxkPZZ0atK6WJ6+vGlN5exz8Rs1kR8JKAWHnyPtrSPAI3mi7+pxqH64/WvTWNSdo9VZKSsLidHjBCnAGdxyqEkkcSatseNejnituHkySjSo3LcsdPd+VZGfikvGYHjsZD2Oor2hBI28/1pZ4uMbKY2pSqwP7Rfam3tFeCLvy46bishFc8dvpWMEMipJ/c21a/h32QRbjtbti8mrrz/mtUlrbwwMoTCY2x65eVcsMV7Z1zyxx6Wz51YfZa9EgkvGZ0U7LjOfdTnhYe04osZbShPLpj6Vfx/wC1NtwqVrOLBlYYPl76R8JFxcyyX7ya3JGMdKEo09dg5uSt9H0ogtHpXIOxwfl/NLuIrIYG0EM46ZwT7POrba+V4cbZxzHr5UvmnV2IRiAu/iMfSu9Y7pnmyy8bFxj7dFLr3F/sYb+76UvFtBDN2sMUannsPW1G302Z20uSD4n9f1oUMdQ5nfp41weRnblxj6POyZpSZN2ihtnmkTvb78jn2+P51iLnjjpxD7vYo39Ugdzrnnt7+XlWu4tC01iiKcEnb14flVHB/sQkc33m8dGfIZQOS+0/rXfhlLiuTO3Arhs7YWYsrMIz74AkI5sx6AeHKmUFs8cauzZdsLIG/uOf0086fQ8Ct5Ih/UZEzqOB63oqLg8UWNALAnOSfKo5Zpvs74JJAq26QFGRHRlAYBTy23GPHy61Ke6it4Uka4VUJPfIK7Gi57WUBGLKNIJ8QayvE4m+9s7DKIO8T/b1qKe9FKTQyv7wPbjuKw6lX6e39aQTy93c4APXx+tZTif2yuI5CLK2XslPdfHP14Vnrril9xByZ3ZkJ/DnZR7KvLxsmV3J0jxsuGUpt+jb3HH+F2WO2vIs77KdR+VAv9tLEZENvPLg4BIC5+NZO3tVdnyjf7yBv8auHDj2mIkk05OosM74q8PAxpb2BePH2OJ/tlfA/wDl7MIT+Evkk+HKs/dpd8Xlae7xqfwAU59nSmyRJAcpL2ZICZDbnx/ijVs8qXMagju5ZhXVjxwhpIrGCh/UztvwG1xq7BWfP4mbI+FN7K1NuO8F7NenLI8c00PD0jhM0kyKi45HJH716MrEqvbQhmZMLJOdh7jVUrGcqL7UxyppdSRq2096l11Y8KNw5ELdqVJ7MLqAP+rGflXla9uJJF+8OkkY0ILXugnpnxqq+t7pbmf74jvcLjRKmT3vPodulNx/ROb9FMdggVTDdXSdkcqgfKr7geVTS6+0MIkSG/umiH9zk5Hxqt+G3CFMw3KupLMew0nz5/lUWtkkdofvMihEy/bEoCoO/nRUEujOd9l/bcUaZbqS/kE2CilHAz7R+lUcT4dfXsST3dz26YLAlu0Kp1OKfWkVtZ2uUhR0XK5J5r5E8/HalJLtaJKuywSypzxg/iHxANaUAKZnI7S0juewHayyagoDMEGSfjTC/wCC/aHszLCrJZ6gnZwjBLe3rv8AnVvEb7h0t8tzHbqzKgcOd9Rx58/2qEn2lub1oUxPCsS6pNDltajnkD86lOMr+o0ZL2I7b7PMbyS1E1vHdA40u+Svjyzyp3wvhfDuFzubgteTQt/Ty6iFumWHMgUH2Ul1xm5uTiKEzbDtNwG3UePvo+OzeUIkf3kiUHVINtRPNeeOQ99UUbQjkMYuMRxKsQSJgrtr7KFUA8MY5+2m/wDmSNbxysFIjckRxFcHxGnx5VhbmzltlRggS3c4VpG2yRyNcUYV2fsomBOlV3z4Y8R+9JLG+0Mp/o7vOKzixdrazCWeoFe23Ynkds7D6UtWaK8imcIdh3g+/wD3e6uWdzD95WN7NpGdSpxLurdCPA1yNks7lgiyKNXe/X5GlqX4No9bQwxFh98jRjglGzzPLfoa0HDJ5raLKsXjzuUGpfh+lZy+hfSJInjR410kMcZI2OPpQ0M8z3itMdJA/Gqcx4kj86V4+XYbPpdk8F43Z9pGkjcgTgHyxz+lXyQPHKYpI2Vv9w5++sKOJSiZEllGo8wT6wfzptb3Rkj0QzSiInvIrZA8wK4cniRf+CSwwl0P3Ax1zn35qAUFtvX7UEL9LaJRO7Swj/8AkacaR01fWmUCJP3oJBKCc91vhXBk8acDlnhlE8EAxjlRcaYU55+vnUFi36nzoqOPKkfOudiJbATEXfAB57Y9fKiktMJt4fL6VbbQ6pN+nj8v5pssAVcEHn7/AOfzoMek0Z6aEjZhv69ZoBocSbA+zkf5p/dQZOAPh8/4pTMMZ3+NaJJqgJhj2D16FDyMSpxy/SiphhSfz+v60KTnbfmenX61ZIIMc6tvj51aOW3yHr4VIRasYAx489vpU2j07HoPX80xkgKQePh7v4qmNQWOaOkTOfb86pCFSuNvYM+vZTxYyCYINQG23z/n86HurfGV2Hn6+YoyGXG3l08PXwqNw4ZTq559fzWbMwpfxnc/H1+9FRuCeW/rP8UtViG57eXrlRMMh9p9es1yUKFscezFe1bdfjv/AD51UC3UHnn151FdmA6evWKxqLSMtz+Xr4VwMNtidvHb+K8R3c+vL+aqJxgnx8PW9YwSsgzjJPv29edXLMVORz5+efrQK5Jz19esVeFJXy8en8VkEcWF1bG5jMx0knGQNj68K091c6oMxyAyRoFyT09da+fk48c+vn51pfs+U4k/ZPIUlXwP4h7P0r1fDz39GXhkvUj5F9s+Mfaa4vWgMkqJETiOPYOvifGtD/hv9onj4JcJctgwyh8HmoI3x8OVfQ/tL9l4ZI43a3LKp05xy/b8qyV/9mjwmzmmhMjdsNJDHIA9v6125FUWjvhTpoZQfaKe6mKq7MucAD1/FMuK31xHYhbYN2p6cyPXjWF4JL9xuB2jEvjSuWx3q2PCrAzwyPK+tpCSW5kD10rixY7dF8s1HZjU4bcXdw88glkd3wcn8JrWfcYrax7FXyQuCUzj+KZ/dILJI4UUBTsAevv/AF6URc8NRINR2LfGuuPjJI5J+U3Qrs73s7UYRlcDnqz8frV014qx9zSGO+3j4e2lxeK2DLGWYHw29eyh+2G+hAB/8tq48/kcVwicWbJylotMjO42+Hr5UTmKztzPNpJxsPXT8qA7Uhhq9evGh/tFcdnHGozh19e+ufxoKTcmDx8fOVMpt+NrxDjkMLjI16VB8fr519EtOFTNE2l0Cht8c/Z7a+G29/8A5fxqC4K6gjhsDkQP0r7HY/aSzura3vYZsxyKNQ1cm8DXqRx84nfKXxvQ/gs5CixJ3dR3PgKYmAR4XpyzSa540jWZmt5UVsAjvdaydx/ieLX+jNaNJcLlSsJGKn/FQXmcjfXRjVW19zbGc18u+1n2khdJuG2cXaJJlJZBnHsU0tv/ALX8V+0KlQPuVuc95jtj9RSZsWkAeJiyt3QzuAzHnsPCqY8Kjth5N6AouHcP7MmaaNHH/wBOSfJz7Fo+KysY1WfTBJEraSBqwT4DwPspceIYZRa2kQYd5pHOTn4/KpT3IEAH3iCCRt+42fjg8vZV+uhNhzizmeXTKyIpzpRwoGeQGd6ClaDVFEEOk82eRmb3fxQk9/NHGkcUxaIPuM88/wB3z5nepyhC1wqbGORJFBPg3eGffyprBRdGklsjsEwEbOt99QHn76Z28hvGFw6IHkIYBnGfDagLW0RpHDM2hkZu63LDZ9CvTzdio+7SLuxCDTq5Md6yWhW9j2Ne0tprlERZIzpYlenXAx8qB4jGkKJIJnBcBnjWXbB8ev50t/8AOSKyPedlCQWaJH06jzxz86pzFPcNLcq41Dsyw6Y8B7qEL/TSoJk1hsRxnTqOkBsPnwPnjrihJLy8tlPYzSBI17uk4APgSRud6ta20WqG5kWNWUlVL6s59h8vdVMbwK/cOtezLsGwBqyQDVifZXHxq9ZQ88ZcS6ge07yDwPU9DQtnbtNdNM6ErsoHaZxn/VVcVtDLFOG7RyG7qsce331WySW+tw4aMgBtPMew1lI3Gh9DxmSCdUMS3UyIcEqW07b4GfbRtleuq39qQjaityyD/T1/7cUrMMSWtvcrMCrKx7ME6gCBkeXTFd4dL2d72zupC5V1Y5Mi8jv7/lTMDBuMQR2l2k8JysYEffOokaR7htVUVqJrhHDL3X15kbdgcZwFPPzrQ8ftEl4W0eCfu8mlmOB3cZDYA8M7VmrB2kkMblWKr3HVcnl7tvyoAHMtrLGZjGixQySaUVu80gHMk9fcab8M4VNLwcXkpeTQrdqsbb6QRpPe6Zxy5c6De5t+IcNiS4MNvNDpSMy/3bk8xzHmfGo2vCeJXqyLDJgEd4a9CgdORphAW9MJ7JZV0qy9xGbIVyd9QpfPw95IYUiUS6dQBVQvkCu2eXQ1aZZRfiCSGEtr0JIVbutn8Q+lHcT+/wAEK3MbIVQai0MeOzP4cZ99Bq0MnTFycLe1ukacf0HBRSTnDaevrpUmspG1yPHIECDMmTpJPgRtg9KohtGvAsz3Ou474CFiWUgjHxyaahL37msLPM9uUKrGSQAd9O3tyaEbrYZd6Kbiw+9cNR270gOXVRuHXY59o0t50onjSS4SNYZI0VcEM5+PPatJbydvaQyrKGkz2bLozhlGQfYV2NANotiwVFMMxYr3cnTzOTnbH1pNDKwJLRlKrcTdpERiNi+oDHzxRljFfW0rTiVXUsu2oMGX11pj/lSR23aTKjOFOlZPE+fjUk4jYWcjgxzMjpo7PZlz4g9KjNr0USoc2CWfEYmXX2E4OnBOx8VNC3nAL3hR+8WWp4/7olP4d98fSlqXPDzGGtO1AJGAf+XXlTqy4xKV7Ny0ihdOH7vuz4+dc7TRZNPTO2fHx+G5Ut01Y3Ht8RT6GZJEDxtqVuvrr51kL4QAm7tlAbV34yPxHxFesr9rZi8chEecPGelc2bxYz3HTI5MKfRtY3CyagR4+/10pgZwIwPL5VnbS8S5UyxPqC/iGd18M/WimvNLA6vr/NeZPG4umc1cexjMcpn2ev3pHdtpckb+zajnvlMXMe718qSXdzqJ5H5ijGIkqK5ZN+hA329fKqlAYHbp18PXWqHl1Ng9etWwtsd9886rWhCYJGy88+vfUnbfcn163FDyOAuFxnyqPaZ5n1661kgolKAdvEevd+VBlssent+X80WQzee/r30M40tqHPy+n6UyGLtWnuknP6/Wq5GLKoJwM9PXyqsZaTSBjb5V2ZGUnJI6b+vnRoR7GbR4yevntVls4DEY39esV2TSF2+QoeI5cgevXjXMZ6GTTp5Y+X8flVWvD7586pdSB150OzH5UezDJ3XQOnr1tVCnPn7+n0qlX1d07e/18aKgVWb39fWxrNDJWdVcj4Z39fGrwe5nkefh6NeCY5c849fSqu0IyvT5fxS0aUaJldS7A+vXKieFzS2d/BOoxoYZ35g1B8diBjp4+vjQ4LZwn4vHrVccuMk0L2faruCO6sGimQhGXfPSswOHferRoZBHPEv4GG2oeulMeFcegu+BQySyASaAr43GeRrPv9obX/xAeDf/AFWi7RdZ2YZ9b17soLI0z0MMpKFoEtPsrYfe1ZI4xIDnvZbAzv7vKtMlpbW0QVAORPfbTgch+xpK9/FbQtFhsFssdRGT4A/rQs3G8KRnn+E56+FVhiUULkm5MNii+9cWL6gUiPd0Z/F9fzrnF7nEJQREkbeXr8qosuIR2NiR/c4zgtn0PypPe3ySuwIyTse9jPv/AFpPIyLHj7ObI9gU80gJ7gTO/wCH9f1qvtXC/iPxwf5qTOAuVZx7D+n6VAYIycMPXyrwZO32TVA8k75HfPjsfXwobjNzHLw9WlkAkjGlV55Hr4VbeFIVBXdvA+vnSiQFpNUoJboPCvS8HxZv7votjXF2jPJw6+v5DrPZJzL538vfTizsxZ2D21s8ioc5Od/PHhRLzBUDSYAA7o5fAfpXreK8v3MUUZSML3i2fhXqVGC0XtyFKpIpKNNO5bGELk/t9KYW/DkTvSDtpQR/RQ7KOXeNMJ+GrZo0Ikj1N/uAI9vPH60Je8RtrC1aC1SOR/xSF1xnfGfH3eVQ5WUpLoIjhLu3by9oiDXpUbJ+9KeJcVhMEkSLEzb6cNk/THkK5acV+9zOrOQpU4VVwF6bCihwL7yzzpoMA3yV1OD4bZ33oOl2HvoSzKohSa3SZpioLK6gjxpfcqHbUswXvZxyKnzrXzfd7e2SxtJJXOS0sgJ68yT+lYvi0moqIgpZRpDeG/kK0XYJHYri4E7Sav8A4kg0/mtklMyq/wDWmRWB354yaSWtkSSsiB3de6VGynxpxeRTROlwiMcoAMEjJAxmqULYwUGCaeNk1SqjAeH4QefxpTc3dtF2bxIzyMjagdsNt9ad2faXUsc5DDtNnA/3Jg1nme4tSY1twhjYrk7nBGnnWiK2Dw3DSz41Y3yeXLHjWqg4XYPZLFc3Lx7Blkzp7vv2NZ5OGyoFlMQOFJKA7jwyKNm4fxOFLdVeSTbeMZZV9g5e6jszGd/DZWto8UMbXBl70JlJLqw5geRzWbZ5o7mN+xVE05xj8WdsU0tDcx3UciI4khXdwc46dfbVs/F3tdSG9a4f+/K6wM+GRj303QuwaC7uryzi4dbWQd1ctiNWGsEdT1r1/YXSX/3e9thbIAP6Zk07YzqOeZNG8O+1M0CSnhURVirBgF3Xf8WkdaOtp+HcRle6v7KRJ3GDL+FWI5nf8h40jlTMuhVZcOuZEuo4YtcMaEs/Lp0r1vA1vP2N5BHqjXtFRiPw+3wxWw4SsVrYyGCZTbykkhDup9tLPtBbJeSO0VoVYae0cg5YdB7fOjDJboElonNbpLbpFPLh5Iuyc7bsn4SfaM7+VZ7hfCI57h4O1CrGR32XdtW3xOxp8/dtra5Tfswr6QuxxsRj2Z2q+0aFGuH/AOjo2yeZH4l9o50/KhaYqu+AvBA/ZTRzNGT+NNOPIfSucPt7myXJzpxp0DGDnmD5Uzk4hOFaExRXBfG8j7N8/gaFmF9NG6fdoVQv1G//AMiTvRU0jOLei+3sbO6m17RSFml0l9WpsfpXb65mkiuo2UktltLLp1NjGfy2pfbJf2dypjCoh/04Pt6/KjrviyS5kvHhjOw0Lgkj3UssjXRo477FHZK8Kytbv2yqevIZ2x4imSzzQwzI9oHMiYYPIWOnz2+fSkM18/3h5IZZVPQh2FQjZdS3RuZQwbs2Qam38q3LkhnGmOG4Ybf+payB7e5ULIHJ1QSf2H2ct6XN/UuVR1CtjGccmHNf260Tw+7SwaTR2sxkITEqY1e7yr3F3juFluYnRtXMD/UMD0ak7seJY11qUlmYrgZUb/Dy/KqAsBZpChOpSp7RWX17aEZka1XWoC6sEg9a5GGMwSOecRrnu74I9dKTiU5HOzt0lPeZVH4SG04PjRcckkSLlgVbk2qgLlFwv9WVSP8ASpwce+r4ImeFnjYyAfiTSAeft50HGzXQxLxT6ezGqQD8I3z9aXszxSa4l1J/cP0/evRTOFbvAnpty+hrscxSNtaFhj30FEzl+l1jczRMLhNSqHxqUcj4Gn0lz20QnjyE5Nnp5+ysbNJLZ3Jy+uCTfSeR8j9aOs75TIXjkIHIoWqOfApqxJxUtD43JK4br4n18aHdmznln3VGNlkQY31eHr5VZIFCdSK83jxdM4WqbTBmOG22GcY/OrlYhRg/Squ63h69c6kn4geuaYU5KxJO9VpJjBzvmrGxqGBtjpVEhOTyxWRrCBPq5ez15VAtqbl570PHsxJ2PKrx3e/8MVqNbCrZVEnj6/P86rvsEYHI8qjAdUmzbY6eH0rt2QV6c8bn18aKQ/oYzyYYjOPfXraP+qD4+vQqb2jSrqVdvXrFSjXszhgARz9frXItI3xuuRfLHqXbGPlj6UPNb4TI55ojUfD16+NTYK0Xt9H+KFiCwL1xiiYm0gdPfmvIo3zivBADy69acybQTr1eBOPX81x1z7efvqKLhht5bevlUmAU52x8sfSlGuy0tpiAOwx6/igJ5yFOMBRzwfX7VbeT6V9nn6+NJ4VmeL7yjCSKVtXf27M+ePy611+Lg+Vux8UObGaXdz91Zba5CkMpAPLV5/Wk95dcQi4xHxFzai9hJAxq/Djly29lXFbeTUexjddegsNSr7aJazZJykkds5xghZCwwPFvDzr0seLh7OyHKMeN6Iv9pZJ43M85RgOSR5AHj+1W8Nngl/qLxJZSeaHZvZg/nS24tI7eYF9KKN9YOR8aFnSAQrJFJGdzkbbef7VeXNrToVx/DaC5zlY1yg6nn/NcHZqu/wCE9OlYWDjVzZN2speSJxpKauQztjY5/Smo+0YkujarCTIzYjx/d1/LFefk8bLklTdkZYZGjaUBCdh45+X80uub5kUiLSD/AKn2Gfr+dCNNI4JZ8Y8N8fvUQvaEBFcv1cjPwz0q2LwI49z2NjxfpUbm+lw8MJcsThj3aHll4wsZ0RW6vjmG3+dNTanPfzhRlsnOB9KF4gEljCQTOOYwyYPLx8a61OlSOjghbwpZH4h2vEE7WRSx0yMTk42394qxb6+1yp2rRrHLumy4X9K5EWt7yBGBUhdwN+efWKsv3Th84uCrGOQMsvmfOg5W9mootnle6e1ebTFMh0yHnknIPnuaJPB3u45XbDzLs68sttz8vA0LEsU4jAkPYqR2Uq8hn+1vDem0U0lsoEx1Sjk4ONvL6UG6MZ08MuBO2VYYOTjclfPFMba+ubUzHW8JVRjAyx8/P2U7iaGQqXCTltv9Jx7OtVFLdVKYiRAfwvINz7qWcwxiKYr664k8izNI0ajf8K6vAE+HnS42bvdGQyQrjuqcfpTriOTZF4kCqTj+kmN/ClCWyCNi8WCRt2j4A93hVcTTjaEyWmNLSS1SNUS77aQZ7iIdI9woq5le5jS1R1Wf8KxBhyxuTis1HcdnK4MuVxjs4Nlz5tVY4lcR3Sm3RFdX1FY8HV7Wpmrd2LZroQ3DIokYZ0ldm3HMD9edLbq0mn4hM0ICRByuZDtq1ZFMJL6Ge1tnbOpkLHruOePpXJLwm7MMKRhXw6hj3JCeYNC6B2LuIKLYxfeuEiRwN5Y2P6VASW2pxHaXxUKM6pdgaON+WmMTw3dvIMZ0kDl51ZJfFFKxXNyWwchdOa3YQW2ZIrzEkKRw6O8TufjXJ7uzW3zJewaZNRjVIsBd/Kld1O8wJ+7TSlhgmViAKUtFJJ/RKomCe8u4FbjbMFw3dpZcQd4LuRlbdiM97y9lMkt7HiMY7Nbhm/Fp8/f+dZtbO4W4ZFfUqn8WnrWjsrS7jQu16VUdO6c+7PypuIORd/mD8Mia3WOKAMSdcx1EH2DrSxvtFdG5LPObhlXShI7q+weFVvwea6uMvMZHYFmCJ8KZQ/ZhUhUyzxqv+nVljjyXlRjBLYrdhXA+NGe2aKU4aLPeP9+fOvcclkVbXs7j+mqkZB7y8vngVdBwiGKM6DoODiRgMj2Dxol+EpcRjVJqI6Fc4/XFC1YeOuxAeIXlzKND286LurzR6Cf/AG0YvE76CJEbh0bd3H9OXZv2okWKWRYIGyB0Jwa62tHy8UhA5g4NFpPoG0J5bq4lkDNYMuCCxZydNDXdw5LaAfvB5KseBWnku7ZowiRaW6f0wfXsqiW6bR/T1nu8lXGKXQdmcS2v5I1ZI+z1ZBdyAPgaMi4ZxRCxllDoV7xzkAY50bJC0uzwtjn3iOfrrXZI5ShREdM9e00/KsmGrEU1hxEzdkxC6TnXq239c6YLGllAiNcKURWAQd7UTVz2Mksh72APB87ezNQ+6QRHU8sZOOTNz+FI3YyQCrt2eh9S5zv0rqTQREDtBgtnLE86LnhtmhBWAsM7sSQxNDtZgvnsJI/cG9eytVhOSzxSxljNoOTy3J+VetOJR2NxHIXkuIxnUh7o+X5VXBbIiiSWbGDuNGAT9KsltLObvJMipjOZBg59oBoLRmHXM9s8zy2mtcYcKyqcgjPjvQF3xeKcK0dpJHMow5DHTJ54/tPs2qUV0YYJonniZZVCBw0jFcHYry3oSTsJJcmTPTZfxefP5U0hUv0EmluLl9blNhpCKKDjuHguCpCHfdWGfhTcvFAMRqzn/SQAPlzpZI0zuks8BjhY6Q2nA+VIOhxwfimqVYmCqpOnblWoKlxlc1homhS57roBj+3OMj21ruF3na2S69iFx3t8+Hurh8nFX2Rz54f/AESd9EuMYrqSZHSgLmRxdZPL20Rbt2qrp2OfnXLXs5AwkEZO58DUSmpqnIUCgjf2UHPN3dIPSlQU7ZcE3x08q5cy6Y9B5+BriXBUgb5+efrQ8/8AWLHfY9OlMlsNBMEgjbLAnHP140NeX43Gdh0qMJznO/j7KVX0bLKcg8+VWjG3RTifSbSYLH3vxZyc+vnUJdDzll2znl4+ulCiRBGjqd9O/r9Klbyhw2QPj8K803ytqi5hhT4fEY+lQBwpDfM+vjVrAiPYdff/ADXBF/T9u3r6UUrJ+ypAe0AHL2et/Kozns325Y6VajHXpOPHPlVd4urB2OKahXpFKysQasWRsnV41UikY3wf1+tRJIbS+w/T6UtCpl3dlVs4wB6/ilFhxVIJIuwkQLG3dDjUCg6Hyo69m0W7pFhVAGST+Z/WsGP6KHs5DENerffPiK9XwI0m2dvjrVm44j9pre5nEV4rSGMnSsRjRWB8SFG4xSG24xNNp7NJGPeYhensHSsvI00zSSBVQL4Hb50fZ3d0bgxEx24eJhnG0nVd/dXclR0jbiHFBFw8feX3J1acd8ny8qRcJ4tJdcSVDHlXPcB5E+BoC84fxJple5QszDI/qAYFHcMiNjcDDo0zLpjCZITbnnx91M6rYqWxrNAHeWXSXR3z2RfG/MnHSq7WZIuLwaSwUqUXbOk486ulmt0soxO6nAJJ1DvY6E+GPfWW4pfSm6M0PdjYjspP7u7yNCD2GdUfQpdaR7JrQDvADOOXX9aJtLmQvqtUjyjYfUO8fPflj21z7OXfDftHwJ7uTiX3fiNqn9azEGsuRydTnkeWMGibeXB7kXZhtmLfSrTdonB2y5i/d1O2dR2J288D9KBvY2LYjww0nrTAW8hY6HyG5ltgB661c9nohLGTILfgT/V9ahpdlrszl5JNE1vMw1vgZz8Nz+RqkcTzIwuo8/27cwfGmssSTNy3XdT19eVASWiuwV0AUYAfcj4/rW0wbK44o4pGuLW4SLWO8h3V/d0ohJpn7nZKq89jqQ/SgpuHo87rr0gDaq7O1uo5CySHsxy6UaYtoZTXaQRFECM55AsVOazyXV2Jz2ojHlWg7LtYxriDbfiIFQ7JVypQqD11frTKH6Dl+Ab3V3ewrbQwyFcZJB0fCiV4Be3YQJDbQjq8hLn50bbxsrnsou94t3jRiWUs0ZaRy3lv+VNGKiB3IEf7P2awql9xVpiD3UjUBfgKHHC0DsLSEleuNqbfdVgTOkAHx51cOzt0aUy9wb4O2/1puQFH9FcHC7nOXcBOqgfKq5OCuJNIlwM5VQ2MH2HaiZuIG4JA7TSrbae769lWQh5gJpICdtjnO1amtswF9wu0ZFMzgA4AKd0/CmH+XkOXRYstz7NCB8zV0U7SQ3EmAIUAXGOtVXnEZbbs0hkSIacs5GWPj/FMlboRvVlc/CNb63kfK7kHG9UHhlmMkPk/7Rg+7lUUe6vsYa40/wCpzpBFcWw7RwOzB3HekY0eC/QqTJRNw7Vjsu6o7ylQQfzq2Oez1YSbQGPMDGPKj04W0bZ1RBRzxHv8aEulxIMyzaNWciIbH10qdJuh91Z4RIpWaOVpFbu51daoju4V1pFC8rjIJAwPiedXX+mOGziTCqFLsSOefGhgO0VWzI4Yj+mpxk+f1rR/0EiTrIluTJL2XLbOo+vPFU2oeFZLm1jumKtl3Z8avdTYQOluAIooh10jU3ryoVL60tneH7sWA5u3M+6s3eomquyT37PGpQSqG3/6eceP8VVHerPMuREVHM6WUiipJ4GgTsXmjBGcFdSioQSnHelVz1LLpP8ANBN0akyFzJZs+6prbbIkHr30FKmm/eLA7mAccw31oqUQyXUTv93eNO8ehPjt+lLY3Z3YnnIxZse2hfoPsZxG3hGMxhsY55x+1XiRA5SfGAue6c58Pd+VKo0KOSsQB5nJzRLjTC2EGo8sbfwa2g0wfinFeHq4tlV9RGnLDYe0igwsUQ1aBhur9KXx4ub1Yuy0NqzqPSj5pJJC2hQ7HG+fnS6Dui0cQtkkaPBd16Iny9lWpfI8ZElrIWG55n3fvQoVYZFWNkt8HMZ063Yjmfb4UTalWl3a4LEnvMQvX/Tj40eNAUmyDy2ExaJoZkZhkd0jzoRY4i2l8nz57eulOhIh2WQKfw6CQwYUvubXRLr3AbmTyz4VN/4N/wBBmtoVD97IB5auVVm0tpo2jR8Odlyc7+utNY7YOo14CncHlmrDZQIF0FdXUKo2+NKEyyQwa17QOzasMpp3xAQX3C3uLF/6NvgOsqgFcA/9v1qV7ZRt/VhQq394G+R+tCJZ/ewyRntAD+DGPYaZS0ZoRyxoYyrIveG++2fGiOF3pgZFdtSgaRvRkllCgeD/AOrjbpShomUPnY9M+NK0pKmaStUzRtpuP+n06gVyJ+xfRgfpVXD50EfZnGoY1E9fCrLnMJRxvvzrz5Q4viebOFOg5XYxsTueefX50vkPeOo435Yq+K7VFAyN+XQ1RKQ86JnAPWppbBBUEW7BpGd+XTHKrWl/r6tKFQckdPKqkgRxoMxi0oW2XJZvD96oFwYEVkYhwcg+dFDF4dWkOPGqbyJ3jLIB5H1+VWXEtvHEjR/9Rhv+vurzSv2GX225H5fzTJu7C3uzSXFpIVRUw7YGCDz6+jXuHxmRmJU5FFWkzSd4pqbvN3fngeHlVJuEht5VjbSXwefhXGxNBcjquMDu4x4jHr4VTNJlMjY+fr51TbGSSRde4bfzyf1qy4iQ63VgNAJIPh66UEElbzxs3fXujqNjn61abeJbXtFly2dlP+n10pXZYDOHxy938VN7kXMnYM+zHJPI5+vnTL8F7PGVu22GnbNEGfXbsJB/VB7pxv8AHxqq2gcDv76DzqyKJJCchu6GxpGdgMk+ylfYBbcQyXlnPGP6ZZCM451hpBiHfGc6cE7ivodzN2PD7q5eMns4yRvj17a+YPPbyyPJKm+Sw3PeB5V6Xh3TO7xemda6EDL2ve1Nlmxmr1u7W4UHS0DJ3lP+oeXgaCmhgli7XtWQsdWDy09cZqqO2dssk6pGBzbmfhXejoZqrTjl2Q0V1etLEqFUDjUP4qp5Y7cdonZ4VSuAN5PDl59az0d9ELfRPE+oNvIH7p+HWom+UFimCB3lRiWGrxH0NCtmv8JXtwtzK6SSFXyWYL+H2Y8aVNK0kq6jnBotLO8unLwW8r6tydPxopvszxQRGRbbWVGp1RgzKPHFUUWJIr+zl6vD+P2tzJGsiqSNLtgElSBv7629x9pOIbJDax25/udm1ezHn5181kB7ZlxuNq1nBb8XdgYr4HMAxFOfD/S3srNWLF0ba3d7uNWcdoeZJOnPuppHPC0aoo3XPeByGPL4VjY+IMsZhhwFb/qa+ZpinEYtMcURe3K4zq5Mf0pHBjqQ9ltx2uEyHyN+vv8ArVM0Lr3T3jnptv7Ksimdo11OASue5hh8KHknknKxqADjn1AqWyhWYSszKuh2xk+AFERqezGlYyfMbVSkZTK52H438TRWgsAypINsDAx8KtHRN7KpI8sMogXwFea3iUf/AE1IO3t9lSMDPpBD4x41VdjDLGNXjhzmi5MPEIheHsu3de4B3gDjlU04g8uTGhVOgbbNDssYsYU5n8RBHOh5RMi6lUO/9ur6Uy6F7DGmSTaSXfOVES5NA3Fw1vcMIkVcDPfOon3VYnbSRq0mNSjl5UVcQCW1UhooXJGS/P4VrRuiiETS2+p441bOSzjn7qvK/wDlzgqQP9Bxg1TP2VrFpGqVuhYYz7BVEU7Mv/TKhzttWezdBUKdjapGoY9q2s53qIkSaZ1VgHbYORyxRMmuFYCXwdOTilsdxIsmpFV85IwKrBctkpOtFYfsrpT2Us7eMjcq1VqrPEskxjjB/CqjcfvSiIhSjy7x4wAu1MoriF5F7FVwB3iwz69tGSBZC9dNLaJSSR7hSdl/qkfeW38ScevOmV9ApkLdoTnp4UNbWrmcBQG3zypIyiO06B+Igvca2IzGgWu8PKaTGPwt18qnxLa8kdoxjPIeFdsVMuhiuFXn8a2uIPYWHFuNKJkHmelLr6BHuE7ZWbVv3TpGfrWigtu3kGrI222x6PlVb2sTHIUMV2yevu8KklY8pJCbQkUUaLIe6PX8VZHhl1ZRsePhRUlqvbZ0rhQds8/XjVQTsWwVwPbzogKJ44hb3DsneCYUjpnb+KVRRTC7EaDWeerz8KeXaaeHOCrDVIuPZQtlIq3Urbee+wrN6CuwQWkobvf01GxOnNEqpaTecgDbJGoYplGyyTFHKjbIz6+dRmjJU9mofvdNt6VSG4gEsEbzqEbtBkB3Mek4pSbYpI4XcEEDHl1pzbt20miQYXOwG3trlugkulSRtOQ2Pd+lb3s3oUyQJcQ5XKyqmVI+ZqFtDDDG0l1chtuRJx8K0C8NchsYJPXPKlFxwG2hneYzFTzIzTpWI2WKpYJJFChRt1ceudXNiSJkfCjIbB6ezy8qutmgtrZA1yrPp2wuot7vCg7viNmWyrK+TpPdwB68aSSp6GjK+yuJ0UPEw2Db55/xRCYVhq5ePWlUvHOFibE15FrGQRkLyohftFwOFdL3ynH4QOlBq9hUktDBrR5F1xvqTPTn+1L7nh0gPaxJy545GhD9tOHxvoSaJc7FsE7e6hLv7f28CqlmrT4PNl0+z30KByDQiuNBXDk7nO/rzqmTgk98x7CJRMDsrOEJPv2zWXuvtrxC4lDrDbId/wCzNUf+L+L69RmT2aNqagfIh+yvbSskyvFKh0yI2xA9ntox5P6DCTDKqDH/AMhWaj+013f3CDily8gUaUZgDp8ieeKcx3tulvLHOpkBibTvjvf2n3Zrmz472SyJSVkLm5DSAxjlR8EimATNsMkDzpLbmLT/AFfwn50awWT7uA+nUGKjocHauaUfRBpDTh9w331WYgE5ULjI3GPhS+5dBMyA8m60XArLeQAqF3xucZNKQGZxI+devkK0YmSDmzJKMf2kZz40u4vxScoWAx3/AI0Q081veHVA4PVdPIDmfZQ3ExNPeaHQLDpBTHKqQitWPBb2fT4MwPFJH/auo49nWhZ7XtbtIon7rfhOMZyPD9KkLlI5iUHcK6duWeuPpUmUy6GjU93G5P615pC0W2adgqvJp1ju4O4x4+yhyvaJeidCY8rEJQfwb6viQvOroWUQmJiA4bKnfVk9Krv5HMzCN9UaxjOnlnJ3x408GltoKkl2LomIxHz1HA23/mprZiO8Z8AoATsedA3ErRQq0g/qlwEUdBuSfyoiC7aRmDHB86V2tg6GtgheLTJJo1ZxnfYeNXaz93YJIIm7JmJwMtuBj50Ddy9n93SMd7Tgkdarlupre57PYmSJkwV8Rj6UsFbATWTtERT3k0nUvj5ftWP41wEtIbjhsUbQ570GcYP+3yrV3JSAxw68XBG8a749p8eVC3Ek8EYdSGUupYKdy1dOKUsbtD45OLPnt1DxG3gMslpPFCp0sxibA9/LNQS5t2tdErtNpGEQ7ac/nX0i+4iP8kd4oRpL9+Nt1O/h4VgeKWnD5YLdoEMUrbMw/Cw8cfSvRw5fk7R1wzcuwOOKXilwIrK3dtC5fI2Xx91OeGcDtpkU3U6IVkI1JnHjvtUPs28FvJ2SEYmJ1uScgDl8aeQNci+WFtKwEnSDGAQMfh2q7f4VQ9splgjRERC67NlcaR9fzzV7NayWbp2SgN+F0bBG/P8AalsNm6q4lkRy/wDu0kr9KlLbPDAndaJBtr5n4j86ZMDMtx/g6QSZcwlmHdlUbSD9DS3hCrF21tImtSMgNkDmM4rUXq9rHJHnXGSNeQBk+I86y85aGfto0cuo3AGzehRsNDOQfdlFwkepQclGGR5Uy4NxbhszhJ3NuHbuq4JVh4ZqFhfxXlrrZQruPwOv9vtqm74dbSE6E0qrY7m2k1jNWbcdlKgjs11Ajd+lUJE5YpkquctkYJrCcP4xxXg11I9s/bQA4KldmrQxfa+C+YtJK1u2MMh3HxpHE3Q3+8RTv2EGAq7Y8fE+yjoziMbqANhqPP140utDahAyTB5GXO/j5mj44pAO3kCNjlnl68qSSaMmjusHvFowQcbUHdEG5U6s+VHSXCppULGA3+nBoKXIeRyO8RgE9KC7HOyf/tLcEc/3qyKETQsHJ0pjl18s1RHk2iZO4OathaTsnXYcsbV0LaI9AUstwSyaezijOBnb4Vdw9bZ5VkuZTqTbvbg/tVdzbySEHUNZ2wedQ+7FAdTAtnbzrohBNEpSaG81xDdXOjQHUciKjIoRxEVKb5OaAt4pYgr6dDA5q6WeWWWJiAw8xyqTx7pDqf1LL4jIWNtQSLGw5VXZwRzaF1d47YqV6c3EythcYAxRHCCmojTqI8OdZajoz7C5IYljCAbdSedV2VqMEudIz3QOvrwoibQsR0LnNLo5ZhKQFOPDntWim4sEnsvlKQvpbw5AdK5byGN3ZFKjnvUp76GS7RXjkY7b/hod5RqBXIUvtmoqLbHc1VMp4udbSFPx5FWcMfs4BnHPPKqb3Mkkwj0kDqK8qTJCikFR4mrNNxFVWOY+JRpIB/aOZPr51y64iJ/+muATz8/rWYu7p2JUNnSedVRTS6Q+TkDGPKisNqwOS5GhlkVXAcFfbVU7Q4XDEdMYzSmScyadRyeec12SRzpwc5I2/T20nxsfkNLpnkgiQbHXqGfDFL7CNC8rd7WXbugdc1fd3WmWOF8BljDZHMN66VPh5Tsy5Xs2Ud45zmlr6mUvsVvcpDC52DKNQoSDjq3LqgKIORU1y9jM9zJpI0BRz26VnBE8PE8xxl0Vt9PLND4qVglkNrbxSHVqxgsNJ8arGmK/GRqYOdudA2ImuuIo79wKpOj1+VR4jxKysT94eZEbGnSTRUd7Cpa0aJpkS3eaJVU+3as5eXJvAyOwC4xz/EM53P61nL/7awdiY4Q0jZzk8qzd59pL66wNQXAxkCjDjEWTbNXxS/tuGxdozguv/TQHfNY6Tjl65bRJoQ/2jwoCSWW5l1SMWY+Jr3ZYY55Kd6VuxURCvKzNnJ5kmuFGHMUxtrdkLSFVOldhnzxR6Whv7osQA57x22oXQ/AQ6OgOT4VM2sijJRgMZzpphcWKxSFXOXz+FaLsrMhmVU7q/jkkbuZ/0+dE1L2LZ7AQwrISQSmobUvrc8Ys4F+ziS2oLyu+g7cx/t/WsebG4CBjC+DyOmhF8toD30DZpraXTi3CSZcttHnoBVUcbJGYpIS4xqyF5VcTFFJCoXaNcsR1NLJ3oXYQrOY3Dv8AhPTqaeWsZY2U2RojY689OXrFZzUyzxMBq1LqwK1dlaveRxoZDCpjZnPgvU1y5VVE5jBuzWaa8IDrkrCg5NnmfMDNDm1hWEO1zEsysNUZJywqjid6jW8U1rlIhEYVTqMcj7fOl5LzFXlfCRKD3djmpJUiauhldt2k80kUbsI7dw+OSilsdybhULDTDqwrMOu2f4r0jzRuLh4miTtNMiHO4GNWfiKsay0cIuY1B1Ws6ugB/FqH7CmSrTCnRvrNkELGTGliAcn18amxdYXAyFXvAjpQUjxLOLWKQs7rqxjb15UUs7NblRjRKMac88HHwzyrzkmTSIorMV73eYA4/OqHDSZkWQB86VOfzqxGzdxaCcnPl09e2hzpiV2znUhbOOW/QePlWQK2cnjLWrPIoZ2fSpA/SqIDrkQgAE+HWi5Gf7iYwxB06lpNDMYeILqBZe0GoDwp+NjUM2kkbsSHGkHmT0oiW4ktrZ5P6RnD5iLc0LfxQ4lhjkRVGpS2IwDguSenuqPEFNvfzduNOyqQ/wD6QRisotdAS9oos5WFxNFKmmRMuxPj+vjVfFLoHsrS2zhY1PiSxNU3UDniA0TaSQrajy/06T470PkLemNnVZZO5IjH8PvqyjseKCbqTtfs1MscqmZWOoj21i4L10ncOQI4jgBl/DmtdZ9gn2auY45BKdZ/qLy3xWOvMhGd/wCl2j9/AzkDrXXgqLKY3VjjhMSSSy/1BiKUOuB3cEdfhTe74hJJavKjIexdUIC7HzzWf4TfyT3DWtvCxhaMs6puxVf7jTp7o2PEJZhAtxalAWRuQb/UfOuxK1ZdSCZrsT21v2PeukGXKrjf3VUOJth445ZNMvdKEagKUTOZ+JLdxkRK3ewOQo+3e2lheSMzB/7gcYPs8KC12UYOt4YWkVmRA22GYFW9lRZop1Z2Kqy457Zzy5be+qBw43cmIe1VFyDtnFEGR+GpCsVsX0A/1mP4s9PZ7adIWweC4+5yaNeIi/dPh5e+n8U6I7iRFKMvPOTn9RSy7t47+1a5gQ5yAU1br7vChre8W00bKzL3WV1I736UW6COJuG9nrEbMRpVtB8fbSm+sI9COqkZbSTitB20dzCZrVymV1OhOrA8jXfuXb2bqsgbJDA+O35UqlyDdmMN1d8JucQzNobofwtWg4b9tpEZY5kJTqvMUNc2SSpFr2I5+/nSe64bHbWTyu3e1/0x5VpSSVMFLs+l2nHuH3h1ZZW6g7Uwmms5E7RZEPd2xXy7h0yQQhrzWQfwaT+LFaPhHFoYJRElkAD3l7R871O4vpAuSdGhtowttl5MKx5mrknj7R4zuByaujjbz2p7OOEMF8OVIeIcTlkK6CFlI5jlVFKPsH3foZ3VzJHOGKEgn+0VyO7EtwFEUmo7asVn4uPTJOsc7MDyLeFN4OMLc2MsfaxiZDqTB7zCulNJWRk2NOIm6V00qSv4jQaXKx3ALvuNylehu04zbGIOwlj/AN/4vZSaWRIbx4Zsq5OnHWk+RJP9NDemOTcPdXDzMNSt3gPKmXDImNy4B0n8ODS2+Cr2C2ksYMKKsuf7mO5oCTiHErU9pbo+puRxSRzQUabHlFt2jTXME9qzSFiynrQ8NwYJGdydXIUBa3HG7m1P3iZYUb/UKAveIvbBoJJ9b+QqyywcaRJwkNJpWcE76l/DUJWBshMWOqM9/ek9peXKlpJHLLjlmkfEuLyT6lTWiO/ex4VGc4xdIMU2zUwcXhSQ9rq0sefnTm84laparGSV1r8Kwl/wya2s4LxLhnhkGQCeVFY7exXtnJbbBJ2FD5Yqiyjy2grt7ZptTXGFJ/1c6IF3AxZFbVjr661kLmJ7W4ABDedetrxjPjk75wuKu/IVaJvG12au5vIFi1xtqK9MgGhbPjlvJcPG4K4BCljjBpLHYdoXkuZCBH15E+FCRIkCsNn1HZn5iuaWe9IrGNuzZWE0M1q8UrZmDanYncn20wTilrErxgb6BsfhWDgunM3bAEFRp7vWo397JHKhTdZB3Rmo/JJNBlFctmpk4xbJHMXXBHhS171Z9cdsMMNwTyNDLfW0KJavZr2khGuVzqPjgeHtqp3nd5Etz/Xjbu93GR0NU/kt9kXjV6PXHGeJWUYtkfDONOMb1mbsSPCZpyxlMpXJPlR3E3kXjizOdJbS/s8aq4tP22kKumMysVXHIaVH1oKblTGvSSFKLqO5A9tFWsKm7gMwzC0gDeYGMijEsnFmJo4WZdILMeQya5KGThVs7BFPbF9hvjl+lZy/DOqB7mf7zxR5OzWMau7GoAAA5CpJFm9VY8ay2wPI1GW2K3mSSA5LAnYEZpnwu2j/AMwjkZ8sjgop/uUEZPwoOSWwXWw7i5Fv20COB20iiTu/hA8PLNV2MTTZa01ySRsFwOtG8St4Lm/SeZn1zkPHAm2fb4UBc8XSzZLeGJVVDgx6sp5sccz5UkZ2qRlO+hjPYxwxKz6DeEd2NeWaFlWW5hiEkgIUaEVBjTjy99ekvbbiEXdEkc6rqjkQ6lY55EcxVxtDLFFdBGZs6XjU7+t6aLrTDdaYZxWN4eBWCqmI8sdR6mgeGyTQzRSQvodSd26jr8qMlupLvhIsw+Vjk7RVx+GlyTJE2IQH/tyPA8/l86CacWkGLuLDp8skV7A5+5TKfasnVWoizu0LRpHFGzMdIXRmoWzRRs0PdW2uF7wc7K3Rvj+dXWMfZXCs6YKSBM8t/Kt8vBOzKSUdh9rbW0UclzLbRLpTAZ1GxIwKtlXEEcVtbRKkyZZ3AXI6L767fxxy9jHJIvZibU6j+49F/Laggs1zeukQXLKSrEHC/X9KlCXNXInBqbtkI7K2ignSU6VfOnByAfLyqjhkFlFa8T/zEh9VnIlsVTVpkxs3l7fOp3Eum3keRwUD6V/1E9WoZ7gOsobsVYxlFRE+ZPWlTu6BJrdFb/8AmBZz3hY25te8itglvb0J7tHfZyNZb+3u3X+kCEYMudSg8/Pag2iiuLL7v2gEzuJu5y0ciPXhRXDbjt7vdNFtFGRGHHQjGfbvzpJfVO/RCXWhw9hcrcpczH7ugJBZufLpjn5VfHM0TJxQPMsEZKrpXS7MOYA/CM9fDNdvrR7mxh4jDcOwC6lR8KGByG26EMAMVTFbSSG3tpJUaSIFoTGQUZWwWUkes1zVxHa/CcXEre8jjuNEaZleKWNNWx2KsDnfIO/splxC0e14bF2eCdRDN13XK0k/y+Gy4gR2pVEkXuZwckePjT3jF0IoeGWr9ti6iOMLt2nQfEcvOg0m3oRqzOGee0WOZ2eRTgpp5ksPDxop7T+tHc47MOCoRhghvZ5Cg5ke64WrW8jt2T6CEjbBU/7hy38auitntODywuSCi68kc8nfzPuoNNJP2F7VnIrNf8yBOQioDEwyVRtQ5+XM+2ruKzrccTH3iEzZCOVzjJGw3FEW8hg4C8pVZBKRGi5wQwAPx5bVRFIbi2juXgQuuzMRhI+o5eO/nTrVMwFxe8SO+iVIShjAZ9B2Ax3sD34PtpHLruuJShiVWO6wukbIu5PurSSWkF3MXmeOEyz6e0Ybsu5OkDkM4+FBcUsYllVLTtMtcdnMQeTZx8/M1SDDFoA4MyPwLiMSorhMtu34jppVx61iez4YlrDL2zKzyNrLd1mwi46bD50/4IEW9vYHha2ctqWIxnlvzBqg30C3sqnZ7d0TlzRef61fG6yUNF/ZgHAuEcStONW/EUEcSq3/AE2OSyjmMUxk/pT3iy6izsT2RGMDNUX3G4ZpmWBXUfh2X9aFurx+IXiPKJDOqnEgP4iMbsPHau9uKVIulZyB/us7Q3MOFYlWJ5+6mFyq8OMbo+EZcqwX+2h7hjerqiXWdAc4Oce2uW1795uo7a+lnAiXSuQO6KWSspFjWBriRB92nUK/Mnu5/iibmSOSQWc0ZZwd9OMH3/rU+FyRLKyK6tERgnAB/Y0yhNnCjO0PZlTntBuW+NKpeg+xVd8LaHh6okSFNWeePf7aQNaSTTDEZMj/AIhWrm4sJmIhhklc76SNj5k1Lh8FtdxzSTIsU0MioAGyMt51nJIz0ZDhzywcSMbEK6bEZ/EtaGwl0zlGcIDurAczSS9tE/zWZ1kxNFIRgLs3szVlnfDW0Uir2kLaXDr0z+VBPdoyYzu7VlmdC0bZw6uu450qvreS8mRQMRrsB0AoqXiS9powVf8AC2eRqNjJkltLFF7uhtsedJftgMxxhHS6WNNo41xHjr41fw+9fVHqK5XxptxrhzvbmSNArLlkx1pNBbw30faRMI51/EOntqla0OujSW112YkkYZwvLlmiPvYWyYtHl3XSinmKFvFFtFELl1EYTcryzilpux2Mc25Ukrhv7QKlJ8qFU7K4o2lndZz/AF+lXrFdQ30c/ZAaDtkc6uWVCRcFAM7gipLcyXQyO8qjl4U3yMdwi0NWjhsrkXUbBUm/qKq7aWqriU8ZH+ZSHBC6YRj+7HP3b1TGsU1npk0syHKqD0NFXNrZ8Uhht9YiEYyozzrn5tzJaugLg8rNAYpnY6t8+Vdm4rcQzBIpn0rt5VKJILO7eF5cKqABkqubhbyL2ttKsiMc5bun4U8mn2UtXTG3+fXzWUMjqrDV0GaEmeC81ySxvlsEYXTvVSXclnbxxvBpAHPFFXE/aWFtBrR3kbUzqN0G56e6k58Y6INUrQui1xfeNeBGsZUKT40K1ubxkQ4jGwDE7c6fJGI7efWqOWGktjc0HacJ7K7Wec6bZe+pZgT7l+tIsnJuTNGatsaSyQPGlirAgR4A6aqy/wB6ngdojHgBtPOnr3AjuH+72ile62tzkCh+MJDd2js0ECOzZeYc1psbSVNjQaixTDGLoh2wwB77E8hRP3WFbppER1XOUzgny3pCbvsx2duwaFf9XMnxNERcQa5kSIpuDzHLFVafbKtjDidrchIzHrZpm74zkADpUIbCCKP+vJqmYjnyUe6i2vvvdk0QkGIyP7vA70D98WKy7UthllIDAbkfnSwdLYkZUtld0gGhAHGdgTsPYB+tBcXYCK3tIAXkU6WYbnPUD41fdcTNnMjmDE4G5lOSvu8aL4ebm4VJVIxksxRQMjnRba2yWSfsCvYynDreaR9Dxr2bjmc9OXlV/DbqFpVvEaTRHtPkY1UPGb6JpPvkjRx3Kkxg4ySORx09pq+3kMM1vYSxq0TnVOSNix3Ce7881pL6tE5SdUT+19lre0urePPapkgUu4rGlkllrCSyFMuh5DvE7/GtdxFlubHsLdUR4XESqw2Y4zt4Uu4naWEssTTrI6xuIJQnRtO3uqeHI6SfonCb0mUy8MNzPrDCSLsda+EZxnSaU3luiX0Nu/e0xAlc89yQPftW0tODqb24mtrkGB4SujrkDHx2pEvDSL4sjsL0dlHEunOBjvN7qMcqboaOS9AFzbfeJgkcZmkikPbzAc2wO4vkKK4RcIl6gRoGn7Nlk9urYD4j4U2ks4eHq1nAXIYs0kmMEt5Uu4fwPsJHnEervZTPXn8OdD5U00wOSqji2rLJDEVKXDayCTns1/0il0/DEjsJpjs1uACTvqJP71puE28892jXcLa0lwz8vR5CocStFuOH3MRiWOd5cFQM6iv8VOOVxlQI5HFme0SWq2qzyv2FxGGBHIePvo62BS6Yv/UVh03z4OPyIoninDlh+zUAucCN5NEZJ7ybZBPzBqvgds8Fui3Ualw+mPDA5XFWeS1yC52rBogsFx93l5yZAB3DCruGWN2I7t0VWitVD3AZsE55Y/OhftBFNZ8UEv4YkwwIPL2edPYpWueC3HY93tzlyD+IheVbnX2/R+db/QSykhvXlhlGkHGgDnmmFpdQfcNTgSrauS+eZKjApBw2VVvYta8z2eodSPXOjA8UPDUVc6Lucyu3XT4e0mhP7Ohcm3RoLm5t/wDyzRDJuB2oV+YPOqo1hhuu2jzqdtH/AKR19nsoK+iSTi9tMxKJbx7Y2Hn/ABU7qaW04lC6KDbv+M567YNRi6mqZKIHx2F3swXGHSYg45adX7UuS1nmh4msWWYIhP8AtywH61qeJSRlZEGALgDnj2+jQVsFt4LzLRoLm2b+o3Rlw36U0Z7aHjKxEVZeKC4iZY4beHTl84yowBtR9tK8P3mSV+0HZgxsf9J5D9qAszLccTEelewQFjt06+2vX90zWQSI6g7frRkuVRGkr0OLa5ee3VfvvY20hwIymrP92k789udS4VcXEgurlZon4dGjEK0ZQyMOQGR48/fUhFCUXV+KaRXXK50nkdqB4vLFYr/l1uuoa9ZcHfpuOvOpwny+oyk3of8ADbhry0g7QwrDFmWVFUvpHjnlgbDGelM7m4tZF4fqdmCsXiZx16YyfPnWetrhUsIYbWR2muO7r0aS2nmf3ppG7Hg8BcYXDJLhc4bP4hn8q58jalZJ3dlYVbS9+7iQHEwul0EYBI35eX5VdxHiDr2OEWSOQOC+nZABuFPP65pfYNHNxAwAvapbg6yu8cozt/8AcfjTp+31xwz5ls9u0iOMeRBxsdgM/GqWuWwlVjeQtYCOaaO0iVWdkPebHXC/3CgDxCYcKuhapmBbiGQAMCxXS4b379KLn4bLc20vdSAAgQPKAMkHO/hXrWzjS1MkK2uqXvdxtQVupAB8+XnQU4rZq0K+ITvFIERVdNo9CDBydyfLrTC4cP2bTuUgkhZTq/AW6a/lvzG9HWtgkNrNcSIVKrnKjUfHfy86W/aC3aTXbrrZpY0lQu2QGI+XOjB8qa6Al7JWcF5/mhhulhZViAUBc6McgGG3zrOcWtr43M1wE7RQwVzHHiQryJPiPZWp4DHMkEHbW6RFFZBpG+kY5+PXahOO3MUv2hht0R9SrowPA5zv1G+fbVMc18jQE/toS/ZKacy/dpbW2uNOdUkq5K5/D7eRqjiv2ib7lNngltaSSNiOSInVs3Ly5U5SE2FnM0+000mElXZ2UHY+ftqHFuGdpxaO8lhWTh4BeQJ//d5gbk8qvDyXza9FFP7CPgyPBYQ3dsCXmR1lydjv8jULnh97IzIVX+l30CsO0x10jr7KjNa3tpZmGFXhBlZVWQbuN9xTninCb+54bw+6+8LI0MYJmTukdPlVvmr2WUyrhfE5HtHhmRZQB/1NGCMeNaLh5iueHSSXTK8EaYC/7vXWl3DoI7RWszL2t52TO8vLH70sj4k/DbFZYiGZpDpIGoHFS5vloLm/QbccVtkkRlmmOgkDtE3Ippw6+sm+zfEdY7JWukClcsWBArPxC34veIXuEaVjyRce6rb6f7vJ9ysbYW5ifL4OrWw6mqyauvY72ih57S1k7EO7NI+vPIkZ6eJptxHhVrawveWCtdrMmlldcGLI399LLSCGTiLJfAXEusmIdEB/ubH5UxbiIsOJNaLbkI/OQn8TY6eVQcnaUSLk70Ibq2uLe0DSaRnupnYkeyjLSf71w4ap0D2eBJnmUPL4HaldjiXjgu+0Z7Ryzydr/Z5Vbw8RyfaGa2t1GiQHO34hjmPH9qo5NafoPP8AR1Z8SF9ILPtNR16QARt0zSHiPA57PioNuMKGwcHFE3Q4fwqZGs1kaZnaJ+9+FvZ4Vbh4rayExd7m4lLRuOi+JovLSoPyV0D/AGk4gr3K2TDQscY3HjRs8EEXDeFwTIO1kjJLKMHypfxzgSw3wldmlLEMxDfiGOXtoz7TXS21xw+2+9PGqRqSg/DU001FREclpI5czR29nYwyRaUkZtR6geNU2cM8HEZ9EqGBVOXDAhh0qvj5mLW0qJr7OLUdIyu/Ko23apFbx3UJiM0mo5IXbwxzoxlcLGWR8RnwSG8W7ugYo2g7FwTjr0rkllNBwXVNKI7hn/GTtj1ij7S4nt450UhcZRQu+9WcS4fcX9siOY0mxnDtgedc3zfbZP5Xy2ZSw4n2E80McAllY4Wd8t8KX393ePcgTNI3gCeVa7g3Bks5JrmaTVpGVUL1pLdQXTXjRWtupuJCSSzDOPfXVHLFypFflVhNte8QEEUfad1sHS++aN4dcLe8R0SJGCo3YHFW8PtzHw+ITaVmZu9nqBRPDODyzQ3i6Y4FbZZGbORXM5Rdoi8ieihb1FvpYluEmZzjSn4V9leiuHa4lJmZiCVIY/AUjmhtrDjMUVjHczSROO2mcYHuHStA0f8A/kGcqvZlC2fMUMkVBiZFT0Lb69uZWudQOlMAe2hIXkkjltX3SX+8eOPjR6t9+tZy6aLcgjIPXNI7XhT/AHuKWG3bsklVe0Rs5361fHKPGmi8ZXGgngvAY+Iy3NjLI8VypBjlHIgcxj2UdafZ+OC8u55WZLeFWC6juPbTz7lLZXr3qxyO7sNESALy8fDereOWt5fcIZYrZ0kuGUTCPp4+6pvM26T7JvK+jIcBtFaW4uVbFrGwXU39wLc/hvVF42iW8uWZmh1BoAfHOw9lOrngUx4XPDa4RJJUyXOkaVB2HjREHAJ+IXFnbQpGyQZEhZ1I5f3dBnaqc03ZuduzBlLq6k7Vss8hy0kh5n31q7G4fh1hIcO7/wDSDOcqx/2iuW/BnWS7jvHBks01pGD+J2pjY8Ljn4ZGjwtJ2MgIy3dD88GjlyJpL0bI9FNxNNJxN42gjlghiQ6nXct0x4+ymi8PiuRazTxYmiOsp4HzoM29zFxZbmQ4jVe073VuQA9gFHcKuWnmfWhDt3jluprnyN1o55PQLHbhuINc5OgPqC9NeMZ91BWlwnEZZ7e6V8u7aTjYsoyDTWOEpbTHIjKuW5cqruEa3i1x7alzkcjtSRnRlJk+AdrDYwC6kPayEoFXpvnNHz6LWUmdO+O6JAMlfOhrCSKW4tZe4zxxnWR0c86Oiufvdw6svcBIpcruRnqQnvIXhu+1ncFAo3J3NWm4ZFyM6QN/dyxUeJWP3q6gtlUpAzcx0oVoJYIZbV2y6OQrZ3IHI0ErSbGavbGHC0W1up9iFI1E89THc1MOtyxlLY3zuBtVFrKsVmqk6umrxqSgkaQN2OxFC92I9sF4+TdwJaKhaJ1Uso2yNWQfI0Jw028cUdukT6U/CXycez6UxkSaUwRRRrgEqxb1uKOe2FvFEioCFbJbxNU5VHiNy+tGY45ZG/jhdpGTS2G7vQ9Kt4cYURrYs/N373d0rTC6lQFpHbQATgNy1UHDbSSRPLLvMYtGPKqKVwpjr+tCyZIcRCCLAjuNETA+KnP6VobG0jSO2iZVeOFMZxkZFI0hSI6JThFOvA5aulWcNu5IIZUbZckDHjTSbcdBa0S48bl2iRO8k8hVtO+3T+adoxW5mR+Zwo1dDjn7aACvOkbrswPSuszNdypMPxyKVPLapOVxSAugjiFkbmKMI+TG2pQP0oa/ttNu9tKSEmAYHxxTjUBKoYdP4oW+EizHLI0JXCMeaP5UsJOzR7EcFiLIvOGZnnGSM7KM0tilllaRZUjAmLGLP9pXly8eXvrQyxFkdN8FFj28KBS0dkhUorGJiuojGV8fyq8Z+2Ope2NobO4iuUkupgS+CnRccl9eVen4fZ3g7aWVfvKgoRpdcb8/byqy9vJG4nFEI0dpE/pHT3YivJceVNOHXEs1oDerEZlJfWqjGPP1tXNycPsB62LA0E8kK3MOmFT2UeD0A6Y8TTBownCbMRIIoix3lbY78v3ogpZXgF0XjtLxfxNjIbAwCG5r7RVa8MF0AwkQup1Kx3GfH9qWclpmk+iqyP3aaWKXLAgFUXB7x88jFFNNbQA627KKEFm7R8nOegx78edLjbXSXLSrIjK577KPw45EeVWRQi64h94nVJo0XA1f6h1JpU0KFssd1I8zIs6oMxo8jKvswozXpLCKQrN2AhlZQHjExAHs86IhiCxxPGVZW56KreIx4LynCjVn/V5ftQU5VxDyfQRPbrc2KLHOy6MAAf3DqprKcQc3PGbopbRvJNIQqMrHQDnp1ArRTS3EFzAuzRyKqBjyxjFHQlDqlSNe11aQSNx76MMjggp0AWMD2zRo+vScY193HdHh7KW8Rlif7SiLs3imG4ccn8Qae3TSNeQaZBojByAOvrpXXt0u/wCqFDSRDKkb4H6ilU6bZNdlUsMVxJHLLGNUWyY8Tz2/SlK2Ub8WmR5mCyyrcIoY4BX9DTuK37aNdPjuT4+dTMcPDtQd1/Fu43NaM5JGQn4ybeN0nmQMVbbI31fWirO5t7pJYJUVos6tSctXX3VbN/l/fTDXEpG2fwr13FdDxWNs9w+jsgn9p6/SqU+KXsdIy/HJbuS7u2tLaOKBoQpkJxq8qVR2rL9m5WeMv2ZLbbe01rLxmm4YsscfaGVsEdV86NuPs8j/AGbigeZxG28oRd5N86f2q8MnSY6bZ8ht7pLO/Wa0Ltp3BYY38q1kF797iTiKQBpWXvaea+dRvfs/wuHiBneeSGNZVSKEjIIxzHlmtHwvhlpw+4jlV5FhmGjRKBnJ65/KujLkhotJ6swV7cQ57G1vJI5nky4K7s3/AKhWqhke74bBEtz2lxFtufxYpLf/AGWmj43MLY6gHJyw5NmnfDeFtw+1W4vJB2kSkggZ260maceK4sSbVWmXS8LtL6wmjdyrMuGZdgtJOHcMfh/FLdJSH0KdM6brp8M1onKtZydmyvDOuSDyIrnDbXRbG3nMc0ZBCFWyEHhUFklTVkuTpg6cARLqa+uArrKMuM4r3HOFNJecOudAWCIYCkEYH1pyz9pGIWTMSYINee5+8XEiDv4bCjVnYVP5Zdipy9iG94b2yx61btA2pUMh5Z6mucW4GnEuJxySLEnZhRqeTAbyxTmdFV4+7qZtmGndaz00lxbXzx3KuElk/phznT5j6U+Kck7Q8bsLkidrSS3V2TUNLGEDBHlmkEX3Sbjqdu7MV7qllyF251poNdvCXm04UZ8j+1THB7bi9mZraKGK4yGlULgSfSnxSq0GH4JbtZbe0lNrK8rdsveHdLezyo97C84n2TxqZEYDtQTnHxpoll2hjQ6cR74xsp8KnLrsrRooGIdh+Ll8Kk8gjYotuHm3t5YEkkXB23o6wFtEZrgr/W/ArNyx5VTHcuCtq5y+nnUbeJ5brQ1rup1Bs4HwoW3diK22U8YtdU8LtJL3cAJHzO9PbUYgRRpHj5GglkSW4ljeMkjyohzFHbqO27ORzk5Gdq1XURoxvRRNw2Rop/xSaiCpAwxqb8NIkjUSan06nXy8KZtdwx8PScamxjJrsM1v+OLAMg5itJtdg77FR4TbwWZa7XWzONESnAWho7T7rJHHZWpAZhkj+0++nS2SjUzS6mZs5oq3EcEuj8Tv8hQjN2N7KBbiCY5fU7AbeFWPODF2QXLk869xgJbov3dtUp3Y0NaOrASY3xzpZP2hJ6kWX5d7H+pCGRmCg43BxQKWsVvZG3TuhQC4Ubt4UWjPJIw1PpzyztVscCzzHcspAJONh4VlJ9B5AXD+Hxx8NvLoxF+6QQ2NfLwoHh0CXUEdxb26RjJYIXIBbxNPJYmgVzGWLHPI1G2jitoANiW5notW+VNGc01SM/f2sksvaMSOyP4PGqLVkhZH33bPPmKb3sYW6ZmyQRnFLTbtOQQhGltsULtErvQdM+XwoyW51O5t+2t4w/dC1YNMMiF8aevtq8lJu+RlE8Kj0boXx2ENnDGyyquSdh/dRESCN8889cUuubeSa/WXOY15eVGRtMjMBvv7KZu0M6L7iMsEYDBXLZ60qsLR7y6mDM39Ru6Dz/amPaSAjP4T16V77yIVYRhdfjjpQTpUNFr2SbhCWUZZ3ByO6oGwoDTnQ6Dc1ezieHtJXZnB8a4W/pR9N+Q55+tE0qfRVJK1tiUoW0+G1Me1F1ahgNMfP2eNDhe2hK6dhzGM0M5ZY5rdDzwMU0XoVbFrMhDa0MgVuY3oG84w0VxKqRgKi6QeppnJEljbqus6jvvSsRmcSyFVK884q8KKqjqk39tIyRjtVXZRzJqpUaJbeGRcOBnA5edUW97NFI/cCI3IjrTO3m/8xumSw5n603FrQUqGPC3S6DxKNBA/E3X141b2SF01fiVufjVMSNawws+7vnJArzXHaszDZUGyiueSt6Fe7oukJa6168Z2Cj18qq4hCZ40CYwOYpdeXkaSoruysTkeNXx3jSsqqcjPWjxemCn2HzW8sSx63LFUAHjmgxHO0KQqh1aj/UPjRn3xPvEascnlkURdXCOq6cdwZPs+lMtDJ0CytPbI9zLICGwqALkKCPH9aIt5DaWKO0RWT8O25omVFjggRGCA7YxkeyrFtw0AjcglVwrD8JH0rn58uwyqiktG7oYdIVjyQHl66UU8afdlETEMo58vdQhvBGrQwKgZSFYsd8/WmKzj7opU6pG5kDmfXShPoWXRK31nQrJp01TMGRnJRVDHmo5+P8VZBdpqC5wcb59cq5dzFlYqcjrSrsmSwiwxx2wVQRkmr42L2b9ogK+J+R/elcUpkUZ3A6jYrUpL6OG7WGNy8WMYxnf10pqvoo9hUzdtFAiyaLmE5wp6/WqredxdNGzEFenQ1CeNtpOg7wNdhmW7UsCQw5/ShWgOSovlkS4DOhBZDhhnf3/WrIS8c8cjvpZ10lV6H9DS9IpPvDOjYwd1xRodSw2wxHSgkCOmT/zB42W2iOV1ZAxyNWXM4gKytH2hU5A5j+KEhgSK9ebOVO+fCrJZo5O6xYjwGx/mmemmPLTQNbStfXAQhBLLJ3lzinF7w+ztmEOdWn8QffOfEfpSPhNraWfHjfszSSf25PdB9lGy3VxPfyyXbrHGT3Ex06b1dSio/wClV1Y4nS3htUljRCVUd1F2x448KiL9JDFGzBQp1Pq5Mvh+9JrRpf8AMyUeRueR0FRtOIGaNUWEswkKuTzG/OhuuSMlYZNaQ3jTPJCrx6soCPlQccQ4gjwrjsY2xpK9R4U2FzKkTxAIEK4Vhvt7KDsw8QZUCgn8XSouVgb1oplRUOp3GMbseZNKpbgyyBIWyX6SDb+ac3WlQTsWJ/CR6+FIeJXFx99s4LW2xCBnWnT9qbFsSNWd0PJKIZZi8mNONOn4UdZWP3G3l7hACbDmTXrmKTQr9mcrjvdcVYeJyEIjA5558qNuzdsOtLSN7fSsx1PuySc80MLZOHXStGqLjIIPMUxtoI5IVdco4ztnOasmZXws2l26ORvQcgv/AEWs8Szo3aA/6hVq2NvPcJ20Xao3JsZ059c6En4aXkLo5A8KJtQ6MuhyMHlSKVbFUq2KLnhtzbcdktY0L2xXuZ338PbR1vAOH3aR6jkrhschTlbcvOJe0wcY3oGW2YzSO76gTzqjnqxpNVaKuyT73/TbIOWIFCLbPfcRdWOmCMj30RA0cF53QcedWzQf1HdXILbbfOlUkKqSFfErW1SZbu2OoY73jVf31UXUjgK2+aK7AQuyatQfmT0FDz8Mjmz3SoXlvVdMfTRaEkEqyJgbfGiprcyKXIDE/nUojClusf8Ad416Wd+z7GJBuOdRv7Eq32Dw4NhLCzDqPZQsMsUSJEshLA4JxXbaOWNn6b71XdRJJE6QH+qarfpjLvYdHdp2/ZhicdaYpcxQ2cmhQZpf7uvlWZtLG5cq8h0sNs0Y8mUMTDONt6F8XoTJS6JMZg+51IRRVu4VDsNuhpe8sgXfOF386KtN41YkkkUr6IP9C8oY9ge91q6JmihRV3xnSBQ6qwOdsc6uD5XSaQaMmRFxthzn2UPNqkVlzsfjV0sfcyp3/WhoFLIeZpl+ga3aBjqSRTIcAdQKuiukVcFSE8qKSJRqZhnO/soT7uzyldwPCnuzXXR2Zllj1YKoor0F3FhYxyPnVpTEJjfJz0x1oJbSODLL7s0B3+h3ZxtpZTgA8hyNWyShI27v9vWqYdSx+zxqqWbXsc+JpBUyUeG77HGKoZVIZs/izyqxyDEVANCJqR9ByRTDIJs7cPNpLZHhU7myZySjBVUVXHL2RzjDnoTVk1yGVsNvzreysaKrNCJezZj5g8/5qEuhJyRv02ob72IpMEnON6lKwMZKnvGmcdizohxKCO6ttWMkchQMNvosxBJzY5wdqsgnZJir509M0VcQJcYl3zy25VRNrTNDTqQou7NnmiihxgcwalA9wLgRGJdIGOWaNjiaMag3dUda8GeNS7/3HO53+NO5+joc4rRK5mMiRRkYVPXo1Vft2VqXRcsTv7audtSB8bctqlJCulVIBGM5xtSJkm09gKWqyxdvPG2sKSM+vlUIhJHbdsU3YcsdKZysjKI8Dl8vXWo3MqiyKqCfZsabnejRl6Zn76VwRpPeGM0fb3KtB2bvlyMevOl0dzCyMpI1Z68q9DIiM8sn/pUVVR1RZws+xt/h3xB0Gqe0JI2y7bf9tXv9hOJBEUXFqdByCZGz/wDbXq9XJxRP0B3X+HPEndXW5tFI7uA7Af8A21R/+nHGYZQYr200nYhpH5D/ANter1MkqF9BY/w+4m8Qf7zaAjc99t/+2oxfYPiwOlrmzI3Ozt/xr1epaQjRdH/h9xJcr94s8NkHvtz/APjyoC9/wy4myARXNmjg5La33/7a9XqeCVjxDU/w/wCKyRKGu7UbDkzf8a5D/hzxO0Pdu7RgSeZb/jXq9QpUZrRYPsLxVTqW4s9x1dv+NSH2D4mxaR7m1LDGMM3/ABr1epFFCxLU+wnELe2kIntSzNtl2wP+35Unb/D/AI/KXH3ywVCCMB3/AONer1WaVIt7DrP7CcSiu0JntCjKMr2jH/8AGmcv2Hvi347MgcgWb/jXq9SOKCweH7BcQizJHNaqxOk/1XP/AOPKuv8AYS/kQES2iFTk6Wbf/tr1erUTXZ2T7E8RERVZrXI3BLt/xqNt9gL+NS73FsxYY2Zv+Ner1GEI30OipPsPxM3G81noIJ0h26f+2p3H2Cvy2pJrQYOd3b/jXq9U2lyFl2dX7FcUNtgz2e+34m/41BP8PL8umue0Oo/6m2/7a9XqelZktlyfYjiMUjMs1rg741t0/wDbXT9juJknVNaHB/1t/wAa9XqSUVYr7OSfYviUg09raDbH42/41yL7E8Tjz/XtDjc/1G3/AO2vV6mUI/gaCv8AwfxIL3ZrUbZ/G3/Gg5vsVxYggXFmMn/W3/GvV6hSAwL/AMC8XEykXNlj/wBbZ/8Atq+L7C8UadtU9mVxn8bZ/wDtr1eqihGuhktFV99geKlCYriyVg2ASzcv/jVC/wCH/GSgAurLcb5kc/8A416vUIxVDeiT/YLjKlCtxY9Ocj+/+2pv9huLMCPvFn/82/47V6vUeEfwk+yEH+HvF0lJNzZH/wB7/wDGrJf8Ob8yKyzWYzz77f8AGvV6s0qB6Lm+wnFEQgXFnkcjrb/jQh/w+4oW1febPOf9bf8AGvV6kSQrRxv8POKOjIbiyyOR1N/xqUH2A4qiAfebPHP8bf8AGvV6i0qEpBp+w3FFUZuLM5599v8AjVY+wvFRIf8AzFnz/wBbf8a9XqZQj+GomfsNxQrjt7Pf/e3/ABqofYXii5Ans8kAk62/416vUqig0WD7C8UA/wCvZ8yPxt/xqafYXiPPtrTfY99j/wDjXq9W4oVJFU/2C4owGm4sx7Xb/jQr/wCH3FSpH3izx/8A9H/416vUUkNRbF9geLCMI1xZHp+N/wDjUf8AwDxMuCZ7P/5t/wAa9XqDigtKyw/YDiICgTWeP/W3L/41W3+HnE1fP3izzno7D/8AGvV6iooKRV/+nXFnIIubIb4/G/8AxquT/DviuQBcWWN+bN/xr1eo0iiRVP8A4ZcWOrTc2IO2+t/+NWW/+G/FwpV7mxODgYd+v/tr1ep2kZnJP8M+KByy3FiCOffffH/tro/w74yMZubE4wf+o/8Axr1epWlQDk/+HPFW5XNkNwT3m3/7ajL/AIacVZMfebHbxkf/AI16vVq0H0dT/DjiyRKBPYf/ADf/AI11v8POMDc3Fie7n/qPy/8Ajzr1eo0hfQOP8NeMtK+q54fnfcO//Ghk/wAMuPa9Bu+HkEY/6j/8K9XqdJWUXQCP8HeMGVpRd8PBByMSOP8A8Ksj/wAJuNIjl7nhzYOQe0fP/wBler1dD6K3o//Z\n",
|
| 78 |
-
"text/plain": [
|
| 79 |
-
"<IPython.core.display.Image object>"
|
| 80 |
-
]
|
| 81 |
-
},
|
| 82 |
-
"metadata": {},
|
| 83 |
-
"execution_count": 19
|
| 84 |
-
}
|
| 85 |
-
]
|
| 86 |
-
},
|
| 87 |
-
{
|
| 88 |
-
"cell_type": "code",
|
| 89 |
-
"source": [
|
| 90 |
-
"from PIL import Image\n",
|
| 91 |
-
"im = Image.open(\"cat.jpg\").convert(\"RGB\")\n",
|
| 92 |
-
"im = transform(im).unsqueeze(0)\n",
|
| 93 |
-
"\n",
|
| 94 |
-
"with torch.no_grad(), torch.cuda.amp.autocast():\n",
|
| 95 |
-
" generated = model.generate(im)\n",
|
| 96 |
-
"\n",
|
| 97 |
-
"print(open_clip.decode(generated[0]).split(\"<end_of_text>\")[0].replace(\"<start_of_text>\", \"\"))"
|
| 98 |
-
],
|
| 99 |
-
"metadata": {
|
| 100 |
-
"colab": {
|
| 101 |
-
"base_uri": "https://localhost:8080/"
|
| 102 |
-
},
|
| 103 |
-
"id": "byZKXMGzyr5Y",
|
| 104 |
-
"outputId": "122eb099-6704-4e3c-fa7c-a05dd87ce64f"
|
| 105 |
-
},
|
| 106 |
-
"execution_count": 22,
|
| 107 |
-
"outputs": [
|
| 108 |
-
{
|
| 109 |
-
"output_type": "stream",
|
| 110 |
-
"name": "stdout",
|
| 111 |
-
"text": [
|
| 112 |
-
"an orange and white cat on top of a turtle . \n"
|
| 113 |
-
]
|
| 114 |
-
}
|
| 115 |
-
]
|
| 116 |
-
}
|
| 117 |
-
]
|
| 118 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/docs/clip_conceptual_captions.md
DELETED
|
@@ -1,13 +0,0 @@
|
|
| 1 |
-
## Additional training curves for CLIP on Conceptual Captions
|
| 2 |
-
|
| 3 |
-
# Zero shot accuracy
|
| 4 |
-

|
| 5 |
-
|
| 6 |
-
# Training loss curve
|
| 7 |
-

|
| 8 |
-
|
| 9 |
-
# Validation loss curve
|
| 10 |
-

|
| 11 |
-
|
| 12 |
-
# Validation recall
|
| 13 |
-

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/docs/clip_loss.png
DELETED
|
Binary file (42.9 kB)
|
|
|
open_clip/docs/clip_recall.png
DELETED
|
Binary file (50.7 kB)
|
|
|
open_clip/docs/clip_val_loss.png
DELETED
|
Binary file (43.8 kB)
|
|
|
open_clip/docs/clip_zeroshot.png
DELETED
|
Binary file (58.5 kB)
|
|
|
open_clip/docs/effective_robustness.png
DELETED
Git LFS Details
|
open_clip/docs/laion2b_clip_zeroshot_b32.png
DELETED
|
Binary file (246 kB)
|
|
|
open_clip/docs/laion_clip_zeroshot.png
DELETED
|
Binary file (195 kB)
|
|
|
open_clip/docs/laion_clip_zeroshot_b16.png
DELETED
|
Binary file (196 kB)
|
|
|
open_clip/docs/laion_clip_zeroshot_b16_plus_240.png
DELETED
|
Binary file (255 kB)
|
|
|
open_clip/docs/laion_clip_zeroshot_l14.png
DELETED
|
Binary file (204 kB)
|
|
|
open_clip/docs/laion_openai_compare_b32.jpg
DELETED
|
Binary file (59.5 kB)
|
|
|
open_clip/docs/scaling.png
DELETED
|
Binary file (98.8 kB)
|
|
|
open_clip/docs/script_examples/stability_example.sh
DELETED
|
@@ -1,60 +0,0 @@
|
|
| 1 |
-
#!/bin/bash
|
| 2 |
-
#SBATCH --partition=g40423
|
| 3 |
-
#SBATCH --job-name=testopenclip
|
| 4 |
-
#SBATCH --nodes 30
|
| 5 |
-
#SBATCH --ntasks-per-node=8
|
| 6 |
-
#SBATCH --cpus-per-task=12
|
| 7 |
-
#SBATCH --output=%x_%j.out
|
| 8 |
-
#SBATCH --comment=laion
|
| 9 |
-
#SBATCH --open-mode=append
|
| 10 |
-
#SBATCH --exclusive
|
| 11 |
-
|
| 12 |
-
module load openmpi
|
| 13 |
-
module load cuda/11.7
|
| 14 |
-
|
| 15 |
-
export MASTER_ADDR=`hostname`
|
| 16 |
-
export MASTER_PORT=12802
|
| 17 |
-
export NCCL_PROTO=simple
|
| 18 |
-
export FI_EFA_FORK_SAFE=1
|
| 19 |
-
export FI_LOG_LEVEL=1
|
| 20 |
-
export FI_EFA_USE_DEVICE_RDMA=1
|
| 21 |
-
export NCCL_DEBUG=info
|
| 22 |
-
|
| 23 |
-
export PYTHONFAULTHANDLER=1
|
| 24 |
-
|
| 25 |
-
export CUDA_LAUNCH_BLOCKING=0
|
| 26 |
-
export OMPI_MCA_mtl_base_verbose=1
|
| 27 |
-
export FI_EFA_ENABLE_SHM_TRANSFER=0
|
| 28 |
-
export FI_PROVIDER=efa
|
| 29 |
-
export FI_EFA_TX_MIN_CREDITS=64
|
| 30 |
-
export NCCL_TREE_THRESHOLD=0
|
| 31 |
-
|
| 32 |
-
cd /admin/home-mitchellw/open_clip/src
|
| 33 |
-
export PYTHONPATH="$PYTHONPATH:/admin/home-mitchellw/open_clip/src"
|
| 34 |
-
|
| 35 |
-
EXP_NAME="test-B-32-laion5b-lr1e-3-bs90k"
|
| 36 |
-
|
| 37 |
-
srun --comment laion --cpu_bind=v --accel-bind=gn python -m training.main \
|
| 38 |
-
--save-frequency 1 \
|
| 39 |
-
--train-data="pipe:aws s3 cp s3://s-datasets/laion5b/{laion2B-data/{000000..231349}.tar,laion2B-multi-data/{000000..226687}.tar,laion1B-nolang-data/{000000..127231}.tar} -" \
|
| 40 |
-
--train-num-samples 135646078 \
|
| 41 |
-
--dataset-type webdataset \
|
| 42 |
-
--dataset-resampled \
|
| 43 |
-
--warmup 2000 \
|
| 44 |
-
--batch-size=375 \
|
| 45 |
-
--epochs=97 \
|
| 46 |
-
--lr 1e-3 \
|
| 47 |
-
--workers=8 \
|
| 48 |
-
--report-to wandb \
|
| 49 |
-
--name ${EXP_NAME} \
|
| 50 |
-
--logs /scratch/logs/ \
|
| 51 |
-
--model ViT-B-32 \
|
| 52 |
-
--seed 0 \
|
| 53 |
-
--ddp-static-graph \
|
| 54 |
-
--local-loss \
|
| 55 |
-
--gather-with-grad \
|
| 56 |
-
--grad-checkpointing \
|
| 57 |
-
--precision amp_bfloat16 \
|
| 58 |
-
--wandb-project-name open_clip6 \
|
| 59 |
-
--resume "latest" \
|
| 60 |
-
--remote-sync s3://s-laion/mitchellw/logs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/pytest.ini
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
[pytest]
|
| 2 |
-
markers =
|
| 3 |
-
regression_test
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/requirements-test.txt
DELETED
|
@@ -1,4 +0,0 @@
|
|
| 1 |
-
pytest-split==0.8.0
|
| 2 |
-
pytest==7.2.0
|
| 3 |
-
transformers
|
| 4 |
-
timm==0.6.11
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/requirements-training.txt
DELETED
|
@@ -1,12 +0,0 @@
|
|
| 1 |
-
torch>=1.9.0
|
| 2 |
-
torchvision
|
| 3 |
-
webdataset>=0.2.5
|
| 4 |
-
regex
|
| 5 |
-
ftfy
|
| 6 |
-
tqdm
|
| 7 |
-
pandas
|
| 8 |
-
braceexpand
|
| 9 |
-
huggingface_hub
|
| 10 |
-
transformers
|
| 11 |
-
timm
|
| 12 |
-
fsspec
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/requirements.txt
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
torch>=1.9.0
|
| 2 |
-
torchvision
|
| 3 |
-
regex
|
| 4 |
-
ftfy
|
| 5 |
-
tqdm
|
| 6 |
-
huggingface_hub
|
| 7 |
-
sentencepiece
|
| 8 |
-
protobuf<4
|
| 9 |
-
timm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/setup.py
DELETED
|
@@ -1,61 +0,0 @@
|
|
| 1 |
-
""" Setup
|
| 2 |
-
"""
|
| 3 |
-
from setuptools import setup, find_packages
|
| 4 |
-
from codecs import open
|
| 5 |
-
from os import path
|
| 6 |
-
|
| 7 |
-
here = path.abspath(path.dirname(__file__))
|
| 8 |
-
|
| 9 |
-
# Get the long description from the README file
|
| 10 |
-
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
|
| 11 |
-
long_description = f.read()
|
| 12 |
-
|
| 13 |
-
def _read_reqs(relpath):
|
| 14 |
-
fullpath = path.join(path.dirname(__file__), relpath)
|
| 15 |
-
with open(fullpath) as f:
|
| 16 |
-
return [s.strip() for s in f.readlines() if (s.strip() and not s.startswith("#"))]
|
| 17 |
-
|
| 18 |
-
REQUIREMENTS = _read_reqs("requirements.txt")
|
| 19 |
-
TRAINING_REQUIREMENTS = _read_reqs("requirements-training.txt")
|
| 20 |
-
|
| 21 |
-
exec(open('src/open_clip/version.py').read())
|
| 22 |
-
setup(
|
| 23 |
-
name='open_clip_torch',
|
| 24 |
-
version=__version__,
|
| 25 |
-
description='OpenCLIP',
|
| 26 |
-
long_description=long_description,
|
| 27 |
-
long_description_content_type='text/markdown',
|
| 28 |
-
url='https://github.com/mlfoundations/open_clip',
|
| 29 |
-
author='',
|
| 30 |
-
author_email='',
|
| 31 |
-
classifiers=[
|
| 32 |
-
# How mature is this project? Common values are
|
| 33 |
-
# 3 - Alpha
|
| 34 |
-
# 4 - Beta
|
| 35 |
-
# 5 - Production/Stable
|
| 36 |
-
'Development Status :: 3 - Alpha',
|
| 37 |
-
'Intended Audience :: Education',
|
| 38 |
-
'Intended Audience :: Science/Research',
|
| 39 |
-
'License :: OSI Approved :: Apache Software License',
|
| 40 |
-
'Programming Language :: Python :: 3.7',
|
| 41 |
-
'Programming Language :: Python :: 3.8',
|
| 42 |
-
'Programming Language :: Python :: 3.9',
|
| 43 |
-
'Programming Language :: Python :: 3.10',
|
| 44 |
-
'Topic :: Scientific/Engineering',
|
| 45 |
-
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
| 46 |
-
'Topic :: Software Development',
|
| 47 |
-
'Topic :: Software Development :: Libraries',
|
| 48 |
-
'Topic :: Software Development :: Libraries :: Python Modules',
|
| 49 |
-
],
|
| 50 |
-
|
| 51 |
-
# Note that this is a string of words separated by whitespace, not a list.
|
| 52 |
-
keywords='CLIP pretrained',
|
| 53 |
-
package_dir={'': 'src'},
|
| 54 |
-
packages=find_packages(where='src'),
|
| 55 |
-
include_package_data=True,
|
| 56 |
-
install_requires=REQUIREMENTS,
|
| 57 |
-
extras_require={
|
| 58 |
-
"training": TRAINING_REQUIREMENTS,
|
| 59 |
-
},
|
| 60 |
-
python_requires='>=3.7',
|
| 61 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/__init__.py
DELETED
|
@@ -1,15 +0,0 @@
|
|
| 1 |
-
from .coca_model import CoCa
|
| 2 |
-
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
| 3 |
-
from .factory import create_model, create_model_and_transforms, create_model_from_pretrained, get_tokenizer, create_loss
|
| 4 |
-
from .factory import list_models, add_model_config, get_model_config, load_checkpoint
|
| 5 |
-
from .loss import ClipLoss, DistillClipLoss, CoCaLoss
|
| 6 |
-
from .model import CLIP, CustomTextCLIP, CLIPTextCfg, CLIPVisionCfg, \
|
| 7 |
-
convert_weights_to_lp, convert_weights_to_fp16, trace_model, get_cast_dtype
|
| 8 |
-
from .openai import load_openai_model, list_openai_models
|
| 9 |
-
from .pretrained import list_pretrained, list_pretrained_models_by_tag, list_pretrained_tags_by_model, \
|
| 10 |
-
get_pretrained_url, download_pretrained_from_url, is_pretrained_cfg, get_pretrained_cfg, download_pretrained
|
| 11 |
-
from .push_to_hf_hub import push_pretrained_to_hf_hub, push_to_hf_hub
|
| 12 |
-
from .tokenizer import SimpleTokenizer, tokenize, decode
|
| 13 |
-
from .transform import image_transform, AugmentationCfg
|
| 14 |
-
from .zero_shot_classifier import build_zero_shot_classifier, build_zero_shot_classifier_legacy
|
| 15 |
-
from .zero_shot_metadata import OPENAI_IMAGENET_TEMPLATES, SIMPLE_IMAGENET_TEMPLATES, IMAGENET_CLASSNAMES
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/bpe_simple_vocab_16e6.txt.gz
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
|
| 3 |
-
size 1356917
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/coca_model.py
DELETED
|
@@ -1,458 +0,0 @@
|
|
| 1 |
-
from typing import Optional
|
| 2 |
-
|
| 3 |
-
import torch
|
| 4 |
-
from torch import nn
|
| 5 |
-
from torch.nn import functional as F
|
| 6 |
-
import numpy as np
|
| 7 |
-
from dataclasses import dataclass
|
| 8 |
-
|
| 9 |
-
from .transformer import (
|
| 10 |
-
LayerNormFp32,
|
| 11 |
-
LayerNorm,
|
| 12 |
-
QuickGELU,
|
| 13 |
-
MultimodalTransformer,
|
| 14 |
-
)
|
| 15 |
-
from .model import CLIPTextCfg, CLIPVisionCfg, _build_vision_tower, _build_text_tower
|
| 16 |
-
|
| 17 |
-
try:
|
| 18 |
-
from transformers import (
|
| 19 |
-
BeamSearchScorer,
|
| 20 |
-
LogitsProcessorList,
|
| 21 |
-
TopPLogitsWarper,
|
| 22 |
-
TopKLogitsWarper,
|
| 23 |
-
RepetitionPenaltyLogitsProcessor,
|
| 24 |
-
MinLengthLogitsProcessor,
|
| 25 |
-
MaxLengthCriteria,
|
| 26 |
-
StoppingCriteriaList
|
| 27 |
-
)
|
| 28 |
-
|
| 29 |
-
GENERATION_TYPES = {
|
| 30 |
-
"top_k": TopKLogitsWarper,
|
| 31 |
-
"top_p": TopPLogitsWarper,
|
| 32 |
-
"beam_search": "beam_search"
|
| 33 |
-
}
|
| 34 |
-
_has_transformers = True
|
| 35 |
-
except ImportError as e:
|
| 36 |
-
GENERATION_TYPES = {
|
| 37 |
-
"top_k": None,
|
| 38 |
-
"top_p": None,
|
| 39 |
-
"beam_search": "beam_search"
|
| 40 |
-
}
|
| 41 |
-
_has_transformers = False
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
@dataclass
|
| 45 |
-
class MultimodalCfg(CLIPTextCfg):
|
| 46 |
-
mlp_ratio: int = 4
|
| 47 |
-
dim_head: int = 64
|
| 48 |
-
heads: int = 8
|
| 49 |
-
n_queries: int = 256
|
| 50 |
-
attn_pooler_heads: int = 8
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
def _build_text_decoder_tower(
|
| 54 |
-
embed_dim,
|
| 55 |
-
multimodal_cfg,
|
| 56 |
-
quick_gelu: bool = False,
|
| 57 |
-
cast_dtype: Optional[torch.dtype] = None,
|
| 58 |
-
):
|
| 59 |
-
multimodal_cfg = MultimodalCfg(**multimodal_cfg) if isinstance(multimodal_cfg, dict) else multimodal_cfg
|
| 60 |
-
act_layer = QuickGELU if quick_gelu else nn.GELU
|
| 61 |
-
norm_layer = (
|
| 62 |
-
LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
|
| 63 |
-
)
|
| 64 |
-
|
| 65 |
-
decoder = MultimodalTransformer(
|
| 66 |
-
context_length=multimodal_cfg.context_length,
|
| 67 |
-
width=multimodal_cfg.width,
|
| 68 |
-
heads=multimodal_cfg.heads,
|
| 69 |
-
layers=multimodal_cfg.layers,
|
| 70 |
-
ls_init_value=multimodal_cfg.ls_init_value,
|
| 71 |
-
output_dim=embed_dim,
|
| 72 |
-
act_layer=act_layer,
|
| 73 |
-
norm_layer=norm_layer,
|
| 74 |
-
)
|
| 75 |
-
|
| 76 |
-
return decoder
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
class CoCa(nn.Module):
|
| 80 |
-
def __init__(
|
| 81 |
-
self,
|
| 82 |
-
embed_dim,
|
| 83 |
-
multimodal_cfg: MultimodalCfg,
|
| 84 |
-
text_cfg: CLIPTextCfg,
|
| 85 |
-
vision_cfg: CLIPVisionCfg,
|
| 86 |
-
quick_gelu: bool = False,
|
| 87 |
-
cast_dtype: Optional[torch.dtype] = None,
|
| 88 |
-
pad_id: int = 0,
|
| 89 |
-
):
|
| 90 |
-
super().__init__()
|
| 91 |
-
multimodal_cfg = MultimodalCfg(**multimodal_cfg) if isinstance(multimodal_cfg, dict) else multimodal_cfg
|
| 92 |
-
text_cfg = CLIPTextCfg(**text_cfg) if isinstance(text_cfg, dict) else text_cfg
|
| 93 |
-
vision_cfg = CLIPVisionCfg(**vision_cfg) if isinstance(vision_cfg, dict) else vision_cfg
|
| 94 |
-
|
| 95 |
-
self.text = _build_text_tower(
|
| 96 |
-
embed_dim=embed_dim,
|
| 97 |
-
text_cfg=text_cfg,
|
| 98 |
-
quick_gelu=quick_gelu,
|
| 99 |
-
cast_dtype=cast_dtype,
|
| 100 |
-
)
|
| 101 |
-
|
| 102 |
-
vocab_size = (
|
| 103 |
-
text_cfg.vocab_size # for hf models
|
| 104 |
-
if hasattr(text_cfg, "hf_model_name") and text_cfg.hf_model_name is not None
|
| 105 |
-
else text_cfg.vocab_size
|
| 106 |
-
)
|
| 107 |
-
|
| 108 |
-
self.visual = _build_vision_tower(
|
| 109 |
-
embed_dim=embed_dim,
|
| 110 |
-
vision_cfg=vision_cfg,
|
| 111 |
-
quick_gelu=quick_gelu,
|
| 112 |
-
cast_dtype=cast_dtype,
|
| 113 |
-
)
|
| 114 |
-
|
| 115 |
-
self.text_decoder = _build_text_decoder_tower(
|
| 116 |
-
vocab_size,
|
| 117 |
-
multimodal_cfg=multimodal_cfg,
|
| 118 |
-
quick_gelu=quick_gelu,
|
| 119 |
-
cast_dtype=cast_dtype,
|
| 120 |
-
)
|
| 121 |
-
|
| 122 |
-
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
|
| 123 |
-
self.pad_id = pad_id
|
| 124 |
-
|
| 125 |
-
@torch.jit.ignore
|
| 126 |
-
def set_grad_checkpointing(self, enable=True):
|
| 127 |
-
self.visual.set_grad_checkpointing(enable)
|
| 128 |
-
self.text.set_grad_checkpointing(enable)
|
| 129 |
-
self.text_decoder.set_grad_checkpointing(enable)
|
| 130 |
-
|
| 131 |
-
def _encode_image(self, images, normalize=True):
|
| 132 |
-
image_latent, tokens_embs = self.visual(images)
|
| 133 |
-
image_latent = F.normalize(image_latent, dim=-1) if normalize else image_latent
|
| 134 |
-
return image_latent, tokens_embs
|
| 135 |
-
|
| 136 |
-
def _encode_text(self, text, normalize=True, embed_cls=True):
|
| 137 |
-
text = text[:, :-1] if embed_cls else text # make space for CLS token
|
| 138 |
-
text_latent, token_emb = self.text(text)
|
| 139 |
-
text_latent = F.normalize(text_latent, dim=-1) if normalize else text_latent
|
| 140 |
-
return text_latent, token_emb
|
| 141 |
-
|
| 142 |
-
def encode_image(self, images, normalize=True):
|
| 143 |
-
image_latent, _ = self._encode_image(images, normalize=normalize)
|
| 144 |
-
return image_latent
|
| 145 |
-
|
| 146 |
-
def encode_text(self, text, normalize=True, embed_cls=True):
|
| 147 |
-
text_latent, _ = self._encode_text(text, normalize=normalize, embed_cls=embed_cls)
|
| 148 |
-
return text_latent
|
| 149 |
-
|
| 150 |
-
def forward(self, image, text, embed_cls=True, image_latent=None, image_embs=None):
|
| 151 |
-
text_latent, token_embs = self._encode_text(text, embed_cls=embed_cls)
|
| 152 |
-
if image_latent is None or image_embs is None:
|
| 153 |
-
image_latent, image_embs = self._encode_image(image)
|
| 154 |
-
|
| 155 |
-
# TODO: add assertion to avoid bugs?
|
| 156 |
-
labels = text[:, -token_embs.shape[1]:]
|
| 157 |
-
|
| 158 |
-
logits = self.text_decoder(image_embs, token_embs)
|
| 159 |
-
return {
|
| 160 |
-
"image_features": image_latent,
|
| 161 |
-
"text_features": text_latent,
|
| 162 |
-
"logits": logits,
|
| 163 |
-
"labels": labels,
|
| 164 |
-
"logit_scale": self.logit_scale.exp()
|
| 165 |
-
}
|
| 166 |
-
|
| 167 |
-
def generate(
|
| 168 |
-
self,
|
| 169 |
-
image,
|
| 170 |
-
text=None,
|
| 171 |
-
seq_len=30,
|
| 172 |
-
max_seq_len=77,
|
| 173 |
-
temperature=1.,
|
| 174 |
-
generation_type="beam_search",
|
| 175 |
-
top_p=0.1, # keep tokens in the 1 - top_p quantile
|
| 176 |
-
top_k=1, # keeps the top_k most probable tokens
|
| 177 |
-
pad_token_id=None,
|
| 178 |
-
eos_token_id=None,
|
| 179 |
-
sot_token_id=None,
|
| 180 |
-
num_beams=6,
|
| 181 |
-
num_beam_groups=3,
|
| 182 |
-
min_seq_len=5,
|
| 183 |
-
stopping_criteria=None,
|
| 184 |
-
repetition_penalty=1.0,
|
| 185 |
-
fixed_output_length=False # if True output.shape == (batch_size, seq_len)
|
| 186 |
-
):
|
| 187 |
-
# taking many ideas and components from HuggingFace GenerationMixin
|
| 188 |
-
# https://huggingface.co/docs/transformers/main/en/main_classes/text_generation
|
| 189 |
-
assert _has_transformers, "Please install transformers for generate functionality. `pip install transformers`."
|
| 190 |
-
assert seq_len > min_seq_len, "seq_len must be larger than min_seq_len"
|
| 191 |
-
|
| 192 |
-
with torch.no_grad():
|
| 193 |
-
sot_token_id = 49406 if sot_token_id is None else sot_token_id
|
| 194 |
-
eos_token_id = 49407 if eos_token_id is None else eos_token_id
|
| 195 |
-
pad_token_id = self.pad_id if pad_token_id is None else pad_token_id
|
| 196 |
-
logit_processor = LogitsProcessorList(
|
| 197 |
-
[
|
| 198 |
-
MinLengthLogitsProcessor(min_seq_len, eos_token_id),
|
| 199 |
-
RepetitionPenaltyLogitsProcessor(repetition_penalty),
|
| 200 |
-
]
|
| 201 |
-
)
|
| 202 |
-
|
| 203 |
-
if stopping_criteria is None:
|
| 204 |
-
stopping_criteria = [MaxLengthCriteria(max_length=seq_len)]
|
| 205 |
-
|
| 206 |
-
stopping_criteria = StoppingCriteriaList(
|
| 207 |
-
stopping_criteria
|
| 208 |
-
)
|
| 209 |
-
|
| 210 |
-
device = image.device
|
| 211 |
-
|
| 212 |
-
if generation_type == "beam_search":
|
| 213 |
-
output = self._generate_beamsearch(
|
| 214 |
-
image_inputs = image,
|
| 215 |
-
pad_token_id=pad_token_id,
|
| 216 |
-
eos_token_id=eos_token_id,
|
| 217 |
-
sot_token_id=sot_token_id,
|
| 218 |
-
num_beams=num_beams,
|
| 219 |
-
num_beam_groups=num_beam_groups,
|
| 220 |
-
min_seq_len=min_seq_len,
|
| 221 |
-
stopping_criteria=stopping_criteria,
|
| 222 |
-
logit_processor=logit_processor,
|
| 223 |
-
)
|
| 224 |
-
if fixed_output_length and output.shape[1] < seq_len:
|
| 225 |
-
return torch.cat(
|
| 226 |
-
(output, torch.ones(output.shape[0], seq_len-output.shape[1], device=device, dtype=output.dtype) * self.pad_id),
|
| 227 |
-
dim=1
|
| 228 |
-
)
|
| 229 |
-
return output
|
| 230 |
-
|
| 231 |
-
elif generation_type == "top_p":
|
| 232 |
-
logit_warper = GENERATION_TYPES[generation_type](top_p)
|
| 233 |
-
elif generation_type == "top_k":
|
| 234 |
-
logit_warper = GENERATION_TYPES[generation_type](top_k)
|
| 235 |
-
else:
|
| 236 |
-
raise ValueError(
|
| 237 |
-
f"generation_type has to be one of "
|
| 238 |
-
f"{'| ' + ' | '.join(list(GENERATION_TYPES.keys())) + ' |'}."
|
| 239 |
-
)
|
| 240 |
-
|
| 241 |
-
image_latent, image_embs = self._encode_image(image)
|
| 242 |
-
|
| 243 |
-
if text is None:
|
| 244 |
-
text = torch.ones((image.shape[0], 1), device=device, dtype=torch.long) * sot_token_id
|
| 245 |
-
|
| 246 |
-
was_training = self.training
|
| 247 |
-
num_dims = len(text.shape)
|
| 248 |
-
|
| 249 |
-
if num_dims == 1:
|
| 250 |
-
text = text[None, :]
|
| 251 |
-
|
| 252 |
-
cur_len = text.shape[1]
|
| 253 |
-
self.eval()
|
| 254 |
-
out = text
|
| 255 |
-
|
| 256 |
-
while True:
|
| 257 |
-
x = out[:, -max_seq_len:]
|
| 258 |
-
cur_len = x.shape[1]
|
| 259 |
-
logits = self(image, x, image_latent=image_latent, image_embs=image_embs, embed_cls=False)["logits"][:, -1]
|
| 260 |
-
mask = (out[:, -1] == eos_token_id) | (out[:, -1] == pad_token_id)
|
| 261 |
-
sample = torch.ones((out.shape[0], 1), device=device, dtype=torch.long) * pad_token_id
|
| 262 |
-
|
| 263 |
-
if mask.all():
|
| 264 |
-
if not fixed_output_length:
|
| 265 |
-
break
|
| 266 |
-
else:
|
| 267 |
-
logits = logits[~mask, :]
|
| 268 |
-
filtered_logits = logit_processor(x[~mask, :], logits)
|
| 269 |
-
filtered_logits = logit_warper(x[~mask, :], filtered_logits)
|
| 270 |
-
probs = F.softmax(filtered_logits / temperature, dim=-1)
|
| 271 |
-
|
| 272 |
-
if (cur_len + 1 == seq_len):
|
| 273 |
-
sample[~mask, :] = torch.ones((sum(~mask), 1), device=device, dtype=torch.long) * eos_token_id
|
| 274 |
-
else:
|
| 275 |
-
sample[~mask, :] = torch.multinomial(probs, 1)
|
| 276 |
-
|
| 277 |
-
out = torch.cat((out, sample), dim=-1)
|
| 278 |
-
|
| 279 |
-
cur_len += 1
|
| 280 |
-
|
| 281 |
-
if stopping_criteria(out, None):
|
| 282 |
-
break
|
| 283 |
-
|
| 284 |
-
if num_dims == 1:
|
| 285 |
-
out = out.squeeze(0)
|
| 286 |
-
|
| 287 |
-
self.train(was_training)
|
| 288 |
-
return out
|
| 289 |
-
|
| 290 |
-
def _generate_beamsearch(
|
| 291 |
-
self,
|
| 292 |
-
image_inputs,
|
| 293 |
-
pad_token_id=None,
|
| 294 |
-
eos_token_id=None,
|
| 295 |
-
sot_token_id=None,
|
| 296 |
-
num_beams=6,
|
| 297 |
-
num_beam_groups=3,
|
| 298 |
-
min_seq_len=5,
|
| 299 |
-
stopping_criteria=None,
|
| 300 |
-
logit_processor=None,
|
| 301 |
-
logit_warper=None,
|
| 302 |
-
):
|
| 303 |
-
device = image_inputs.device
|
| 304 |
-
batch_size = image_inputs.shape[0]
|
| 305 |
-
image_inputs = torch.repeat_interleave(image_inputs, num_beams, dim=0)
|
| 306 |
-
image_latent, image_embs = self._encode_image(image_inputs)
|
| 307 |
-
|
| 308 |
-
input_ids = torch.ones((batch_size * num_beams, 1), device=device, dtype=torch.long)
|
| 309 |
-
input_ids = input_ids * sot_token_id
|
| 310 |
-
beam_scorer = BeamSearchScorer(
|
| 311 |
-
batch_size=batch_size,
|
| 312 |
-
num_beams=num_beams,
|
| 313 |
-
device=device,
|
| 314 |
-
num_beam_groups=num_beam_groups,
|
| 315 |
-
)
|
| 316 |
-
# instantiate logits processors
|
| 317 |
-
logits_processor = (
|
| 318 |
-
LogitsProcessorList([MinLengthLogitsProcessor(min_seq_len, eos_token_id=eos_token_id)])
|
| 319 |
-
if logit_processor is None
|
| 320 |
-
else logit_processor
|
| 321 |
-
)
|
| 322 |
-
|
| 323 |
-
batch_size = len(beam_scorer._beam_hyps)
|
| 324 |
-
num_beams = beam_scorer.num_beams
|
| 325 |
-
num_beam_groups = beam_scorer.num_beam_groups
|
| 326 |
-
num_sub_beams = num_beams // num_beam_groups
|
| 327 |
-
batch_beam_size, cur_len = input_ids.shape
|
| 328 |
-
beam_indices = None
|
| 329 |
-
|
| 330 |
-
if num_beams * batch_size != batch_beam_size:
|
| 331 |
-
raise ValueError(
|
| 332 |
-
f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
|
| 333 |
-
)
|
| 334 |
-
|
| 335 |
-
beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device)
|
| 336 |
-
# initialise score of first beam of each group with 0 and the rest with 1e-9. This ensures that the beams in
|
| 337 |
-
# the same group don't produce same tokens everytime.
|
| 338 |
-
beam_scores[:, ::num_sub_beams] = 0
|
| 339 |
-
beam_scores = beam_scores.view((batch_size * num_beams,))
|
| 340 |
-
|
| 341 |
-
while True:
|
| 342 |
-
|
| 343 |
-
# predicted tokens in cur_len step
|
| 344 |
-
current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device)
|
| 345 |
-
|
| 346 |
-
# indices which will form the beams in the next time step
|
| 347 |
-
reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device)
|
| 348 |
-
|
| 349 |
-
# do one decoder step on all beams of all sentences in batch
|
| 350 |
-
model_inputs = prepare_inputs_for_generation(input_ids=input_ids, image_inputs=image_inputs)
|
| 351 |
-
outputs = self(
|
| 352 |
-
model_inputs['images'],
|
| 353 |
-
model_inputs['text'],
|
| 354 |
-
embed_cls=False,
|
| 355 |
-
image_latent=image_latent,
|
| 356 |
-
image_embs=image_embs
|
| 357 |
-
)
|
| 358 |
-
|
| 359 |
-
for beam_group_idx in range(num_beam_groups):
|
| 360 |
-
group_start_idx = beam_group_idx * num_sub_beams
|
| 361 |
-
group_end_idx = min(group_start_idx + num_sub_beams, num_beams)
|
| 362 |
-
group_size = group_end_idx - group_start_idx
|
| 363 |
-
|
| 364 |
-
# indices of beams of current group among all sentences in batch
|
| 365 |
-
batch_group_indices = []
|
| 366 |
-
|
| 367 |
-
for batch_idx in range(batch_size):
|
| 368 |
-
batch_group_indices.extend(
|
| 369 |
-
[batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)]
|
| 370 |
-
)
|
| 371 |
-
group_input_ids = input_ids[batch_group_indices]
|
| 372 |
-
|
| 373 |
-
# select outputs of beams of currentg group only
|
| 374 |
-
next_token_logits = outputs['logits'][batch_group_indices, -1, :]
|
| 375 |
-
vocab_size = next_token_logits.shape[-1]
|
| 376 |
-
|
| 377 |
-
next_token_scores_processed = logits_processor(
|
| 378 |
-
group_input_ids, next_token_logits, current_tokens=current_tokens, beam_group_idx=beam_group_idx
|
| 379 |
-
)
|
| 380 |
-
next_token_scores = next_token_scores_processed + beam_scores[batch_group_indices].unsqueeze(-1)
|
| 381 |
-
next_token_scores = next_token_scores.expand_as(next_token_scores_processed)
|
| 382 |
-
|
| 383 |
-
# reshape for beam search
|
| 384 |
-
next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size)
|
| 385 |
-
|
| 386 |
-
next_token_scores, next_tokens = torch.topk(
|
| 387 |
-
next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True
|
| 388 |
-
)
|
| 389 |
-
|
| 390 |
-
next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor")
|
| 391 |
-
next_tokens = next_tokens % vocab_size
|
| 392 |
-
|
| 393 |
-
# stateless
|
| 394 |
-
process_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None
|
| 395 |
-
beam_outputs = beam_scorer.process(
|
| 396 |
-
group_input_ids,
|
| 397 |
-
next_token_scores,
|
| 398 |
-
next_tokens,
|
| 399 |
-
next_indices,
|
| 400 |
-
pad_token_id=pad_token_id,
|
| 401 |
-
eos_token_id=eos_token_id,
|
| 402 |
-
beam_indices=process_beam_indices,
|
| 403 |
-
)
|
| 404 |
-
beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"]
|
| 405 |
-
beam_next_tokens = beam_outputs["next_beam_tokens"]
|
| 406 |
-
beam_idx = beam_outputs["next_beam_indices"]
|
| 407 |
-
|
| 408 |
-
input_ids[batch_group_indices] = group_input_ids[beam_idx]
|
| 409 |
-
group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
|
| 410 |
-
current_tokens[batch_group_indices] = group_input_ids[:, -1]
|
| 411 |
-
|
| 412 |
-
# (beam_idx // group_size) -> batch_idx
|
| 413 |
-
# (beam_idx % group_size) -> offset of idx inside the group
|
| 414 |
-
reordering_indices[batch_group_indices] = (
|
| 415 |
-
num_beams * torch.div(beam_idx, group_size, rounding_mode="floor") + group_start_idx + (beam_idx % group_size)
|
| 416 |
-
)
|
| 417 |
-
|
| 418 |
-
input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1)
|
| 419 |
-
|
| 420 |
-
# increase cur_len
|
| 421 |
-
cur_len = cur_len + 1
|
| 422 |
-
if beam_scorer.is_done or stopping_criteria(input_ids, None):
|
| 423 |
-
break
|
| 424 |
-
|
| 425 |
-
final_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None
|
| 426 |
-
sequence_outputs = beam_scorer.finalize(
|
| 427 |
-
input_ids,
|
| 428 |
-
beam_scores,
|
| 429 |
-
next_tokens,
|
| 430 |
-
next_indices,
|
| 431 |
-
pad_token_id=pad_token_id,
|
| 432 |
-
eos_token_id=eos_token_id,
|
| 433 |
-
max_length=stopping_criteria.max_length,
|
| 434 |
-
beam_indices=final_beam_indices,
|
| 435 |
-
)
|
| 436 |
-
return sequence_outputs['sequences']
|
| 437 |
-
|
| 438 |
-
|
| 439 |
-
def prepare_inputs_for_generation(input_ids, image_inputs, past=None, **kwargs):
|
| 440 |
-
if past:
|
| 441 |
-
input_ids = input_ids[:, -1].unsqueeze(-1)
|
| 442 |
-
|
| 443 |
-
attention_mask = kwargs.get("attention_mask", None)
|
| 444 |
-
position_ids = kwargs.get("position_ids", None)
|
| 445 |
-
|
| 446 |
-
if attention_mask is not None and position_ids is None:
|
| 447 |
-
# create position_ids on the fly for batch generation
|
| 448 |
-
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 449 |
-
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 450 |
-
else:
|
| 451 |
-
position_ids = None
|
| 452 |
-
return {
|
| 453 |
-
"text": input_ids,
|
| 454 |
-
"images": image_inputs,
|
| 455 |
-
"past_key_values": past,
|
| 456 |
-
"position_ids": position_ids,
|
| 457 |
-
"attention_mask": attention_mask,
|
| 458 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/constants.py
DELETED
|
@@ -1,2 +0,0 @@
|
|
| 1 |
-
OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)
|
| 2 |
-
OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/factory.py
DELETED
|
@@ -1,366 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import logging
|
| 3 |
-
import os
|
| 4 |
-
import pathlib
|
| 5 |
-
import re
|
| 6 |
-
from copy import deepcopy
|
| 7 |
-
from pathlib import Path
|
| 8 |
-
from typing import Any, Dict, Optional, Tuple, Union
|
| 9 |
-
|
| 10 |
-
import torch
|
| 11 |
-
|
| 12 |
-
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
| 13 |
-
from .model import CLIP, CustomTextCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
|
| 14 |
-
resize_pos_embed, get_cast_dtype
|
| 15 |
-
from .coca_model import CoCa
|
| 16 |
-
from .loss import ClipLoss, DistillClipLoss, CoCaLoss
|
| 17 |
-
from .openai import load_openai_model
|
| 18 |
-
from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained, list_pretrained_tags_by_model, download_pretrained_from_hf
|
| 19 |
-
from .transform import image_transform, AugmentationCfg
|
| 20 |
-
from .tokenizer import HFTokenizer, tokenize
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
HF_HUB_PREFIX = 'hf-hub:'
|
| 24 |
-
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
|
| 25 |
-
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def _natural_key(string_):
|
| 29 |
-
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
def _rescan_model_configs():
|
| 33 |
-
global _MODEL_CONFIGS
|
| 34 |
-
|
| 35 |
-
config_ext = ('.json',)
|
| 36 |
-
config_files = []
|
| 37 |
-
for config_path in _MODEL_CONFIG_PATHS:
|
| 38 |
-
if config_path.is_file() and config_path.suffix in config_ext:
|
| 39 |
-
config_files.append(config_path)
|
| 40 |
-
elif config_path.is_dir():
|
| 41 |
-
for ext in config_ext:
|
| 42 |
-
config_files.extend(config_path.glob(f'*{ext}'))
|
| 43 |
-
|
| 44 |
-
for cf in config_files:
|
| 45 |
-
with open(cf, 'r') as f:
|
| 46 |
-
model_cfg = json.load(f)
|
| 47 |
-
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
|
| 48 |
-
_MODEL_CONFIGS[cf.stem] = model_cfg
|
| 49 |
-
|
| 50 |
-
_MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
_rescan_model_configs() # initial populate of model config registry
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
def list_models():
|
| 57 |
-
""" enumerate available model architectures based on config files """
|
| 58 |
-
return list(_MODEL_CONFIGS.keys())
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
def add_model_config(path):
|
| 62 |
-
""" add model config path or file and update registry """
|
| 63 |
-
if not isinstance(path, Path):
|
| 64 |
-
path = Path(path)
|
| 65 |
-
_MODEL_CONFIG_PATHS.append(path)
|
| 66 |
-
_rescan_model_configs()
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
def get_model_config(model_name):
|
| 70 |
-
if model_name in _MODEL_CONFIGS:
|
| 71 |
-
return deepcopy(_MODEL_CONFIGS[model_name])
|
| 72 |
-
else:
|
| 73 |
-
return None
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
def get_tokenizer(model_name):
|
| 77 |
-
if model_name.startswith(HF_HUB_PREFIX):
|
| 78 |
-
tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):])
|
| 79 |
-
else:
|
| 80 |
-
config = get_model_config(model_name)
|
| 81 |
-
tokenizer = HFTokenizer(
|
| 82 |
-
config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize
|
| 83 |
-
return tokenizer
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
def load_state_dict(checkpoint_path: str, map_location='cpu'):
|
| 87 |
-
checkpoint = torch.load(checkpoint_path, map_location=map_location)
|
| 88 |
-
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
|
| 89 |
-
state_dict = checkpoint['state_dict']
|
| 90 |
-
else:
|
| 91 |
-
state_dict = checkpoint
|
| 92 |
-
if next(iter(state_dict.items()))[0].startswith('module'):
|
| 93 |
-
state_dict = {k[7:]: v for k, v in state_dict.items()}
|
| 94 |
-
return state_dict
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
def load_checkpoint(model, checkpoint_path, strict=True):
|
| 98 |
-
state_dict = load_state_dict(checkpoint_path)
|
| 99 |
-
# detect old format and make compatible with new format
|
| 100 |
-
if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'):
|
| 101 |
-
state_dict = convert_to_custom_text_state_dict(state_dict)
|
| 102 |
-
resize_pos_embed(state_dict, model)
|
| 103 |
-
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
|
| 104 |
-
return incompatible_keys
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
def create_model(
|
| 108 |
-
model_name: str,
|
| 109 |
-
pretrained: Optional[str] = None,
|
| 110 |
-
precision: str = 'fp32',
|
| 111 |
-
device: Union[str, torch.device] = 'cpu',
|
| 112 |
-
jit: bool = False,
|
| 113 |
-
force_quick_gelu: bool = False,
|
| 114 |
-
force_custom_text: bool = False,
|
| 115 |
-
force_patch_dropout: Optional[float] = None,
|
| 116 |
-
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
|
| 117 |
-
pretrained_image: bool = False,
|
| 118 |
-
pretrained_hf: bool = True,
|
| 119 |
-
cache_dir: Optional[str] = None,
|
| 120 |
-
output_dict: Optional[bool] = None,
|
| 121 |
-
require_pretrained: bool = False,
|
| 122 |
-
):
|
| 123 |
-
has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX)
|
| 124 |
-
if has_hf_hub_prefix:
|
| 125 |
-
model_id = model_name[len(HF_HUB_PREFIX):]
|
| 126 |
-
checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
|
| 127 |
-
config_path = download_pretrained_from_hf(model_id, filename='open_clip_config.json', cache_dir=cache_dir)
|
| 128 |
-
|
| 129 |
-
with open(config_path, 'r', encoding='utf-8') as f:
|
| 130 |
-
config = json.load(f)
|
| 131 |
-
pretrained_cfg = config['preprocess_cfg']
|
| 132 |
-
model_cfg = config['model_cfg']
|
| 133 |
-
else:
|
| 134 |
-
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
|
| 135 |
-
checkpoint_path = None
|
| 136 |
-
pretrained_cfg = {}
|
| 137 |
-
model_cfg = None
|
| 138 |
-
|
| 139 |
-
if isinstance(device, str):
|
| 140 |
-
device = torch.device(device)
|
| 141 |
-
|
| 142 |
-
if pretrained and pretrained.lower() == 'openai':
|
| 143 |
-
logging.info(f'Loading pretrained {model_name} from OpenAI.')
|
| 144 |
-
model = load_openai_model(
|
| 145 |
-
model_name,
|
| 146 |
-
precision=precision,
|
| 147 |
-
device=device,
|
| 148 |
-
jit=jit,
|
| 149 |
-
cache_dir=cache_dir,
|
| 150 |
-
)
|
| 151 |
-
|
| 152 |
-
# to always output dict even if it is clip
|
| 153 |
-
if output_dict and hasattr(model, "output_dict"):
|
| 154 |
-
model.output_dict = True
|
| 155 |
-
else:
|
| 156 |
-
model_cfg = model_cfg or get_model_config(model_name)
|
| 157 |
-
if model_cfg is not None:
|
| 158 |
-
logging.info(f'Loaded {model_name} model config.')
|
| 159 |
-
else:
|
| 160 |
-
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
|
| 161 |
-
raise RuntimeError(f'Model config for {model_name} not found.')
|
| 162 |
-
|
| 163 |
-
if force_quick_gelu:
|
| 164 |
-
# override for use of QuickGELU on non-OpenAI transformer models
|
| 165 |
-
model_cfg["quick_gelu"] = True
|
| 166 |
-
|
| 167 |
-
if force_patch_dropout is not None:
|
| 168 |
-
# override the default patch dropout value
|
| 169 |
-
model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout
|
| 170 |
-
|
| 171 |
-
if force_image_size is not None:
|
| 172 |
-
# override model config's image size
|
| 173 |
-
model_cfg["vision_cfg"]["image_size"] = force_image_size
|
| 174 |
-
|
| 175 |
-
if pretrained_image:
|
| 176 |
-
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
|
| 177 |
-
# pretrained weight loading for timm models set via vision_cfg
|
| 178 |
-
model_cfg['vision_cfg']['timm_model_pretrained'] = True
|
| 179 |
-
else:
|
| 180 |
-
assert False, 'pretrained image towers currently only supported for timm models'
|
| 181 |
-
|
| 182 |
-
cast_dtype = get_cast_dtype(precision)
|
| 183 |
-
is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {})
|
| 184 |
-
custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model
|
| 185 |
-
|
| 186 |
-
if custom_text:
|
| 187 |
-
if is_hf_model:
|
| 188 |
-
model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf
|
| 189 |
-
if "coca" in model_name:
|
| 190 |
-
model = CoCa(**model_cfg, cast_dtype=cast_dtype)
|
| 191 |
-
else:
|
| 192 |
-
model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype)
|
| 193 |
-
else:
|
| 194 |
-
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
|
| 195 |
-
|
| 196 |
-
pretrained_loaded = False
|
| 197 |
-
if pretrained:
|
| 198 |
-
checkpoint_path = ''
|
| 199 |
-
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
|
| 200 |
-
if pretrained_cfg:
|
| 201 |
-
checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
|
| 202 |
-
elif os.path.exists(pretrained):
|
| 203 |
-
checkpoint_path = pretrained
|
| 204 |
-
|
| 205 |
-
if checkpoint_path:
|
| 206 |
-
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
|
| 207 |
-
load_checkpoint(model, checkpoint_path)
|
| 208 |
-
else:
|
| 209 |
-
error_str = (
|
| 210 |
-
f'Pretrained weights ({pretrained}) not found for model {model_name}.'
|
| 211 |
-
f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
|
| 212 |
-
logging.warning(error_str)
|
| 213 |
-
raise RuntimeError(error_str)
|
| 214 |
-
pretrained_loaded = True
|
| 215 |
-
elif has_hf_hub_prefix:
|
| 216 |
-
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
|
| 217 |
-
load_checkpoint(model, checkpoint_path)
|
| 218 |
-
pretrained_loaded = True
|
| 219 |
-
|
| 220 |
-
if require_pretrained and not pretrained_loaded:
|
| 221 |
-
# callers of create_model_from_pretrained always expect pretrained weights
|
| 222 |
-
raise RuntimeError(
|
| 223 |
-
f'Pretrained weights were required for (model: {model_name}, pretrained: {pretrained}) but not loaded.')
|
| 224 |
-
|
| 225 |
-
model.to(device=device)
|
| 226 |
-
if precision in ("fp16", "bf16"):
|
| 227 |
-
convert_weights_to_lp(model, dtype=torch.bfloat16 if precision == 'bf16' else torch.float16)
|
| 228 |
-
|
| 229 |
-
# set image / mean metadata from pretrained_cfg if available, or use default
|
| 230 |
-
model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN
|
| 231 |
-
model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD
|
| 232 |
-
|
| 233 |
-
# to always output dict even if it is clip
|
| 234 |
-
if output_dict and hasattr(model, "output_dict"):
|
| 235 |
-
model.output_dict = True
|
| 236 |
-
|
| 237 |
-
if jit:
|
| 238 |
-
model = torch.jit.script(model)
|
| 239 |
-
|
| 240 |
-
return model
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
def create_loss(args):
|
| 244 |
-
if args.distill:
|
| 245 |
-
return DistillClipLoss(
|
| 246 |
-
local_loss=args.local_loss,
|
| 247 |
-
gather_with_grad=args.gather_with_grad,
|
| 248 |
-
cache_labels=True,
|
| 249 |
-
rank=args.rank,
|
| 250 |
-
world_size=args.world_size,
|
| 251 |
-
use_horovod=args.horovod,
|
| 252 |
-
)
|
| 253 |
-
elif "coca" in args.model.lower():
|
| 254 |
-
return CoCaLoss(
|
| 255 |
-
caption_loss_weight=args.coca_caption_loss_weight,
|
| 256 |
-
clip_loss_weight=args.coca_contrastive_loss_weight,
|
| 257 |
-
local_loss=args.local_loss,
|
| 258 |
-
gather_with_grad=args.gather_with_grad,
|
| 259 |
-
cache_labels=True,
|
| 260 |
-
rank=args.rank,
|
| 261 |
-
world_size=args.world_size,
|
| 262 |
-
use_horovod=args.horovod,
|
| 263 |
-
)
|
| 264 |
-
return ClipLoss(
|
| 265 |
-
local_loss=args.local_loss,
|
| 266 |
-
gather_with_grad=args.gather_with_grad,
|
| 267 |
-
cache_labels=True,
|
| 268 |
-
rank=args.rank,
|
| 269 |
-
world_size=args.world_size,
|
| 270 |
-
use_horovod=args.horovod,
|
| 271 |
-
)
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
def create_model_and_transforms(
|
| 275 |
-
model_name: str,
|
| 276 |
-
pretrained: Optional[str] = None,
|
| 277 |
-
precision: str = 'fp32',
|
| 278 |
-
device: Union[str, torch.device] = 'cpu',
|
| 279 |
-
jit: bool = False,
|
| 280 |
-
force_quick_gelu: bool = False,
|
| 281 |
-
force_custom_text: bool = False,
|
| 282 |
-
force_patch_dropout: Optional[float] = None,
|
| 283 |
-
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
|
| 284 |
-
pretrained_image: bool = False,
|
| 285 |
-
pretrained_hf: bool = True,
|
| 286 |
-
image_mean: Optional[Tuple[float, ...]] = None,
|
| 287 |
-
image_std: Optional[Tuple[float, ...]] = None,
|
| 288 |
-
aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,
|
| 289 |
-
cache_dir: Optional[str] = None,
|
| 290 |
-
output_dict: Optional[bool] = None,
|
| 291 |
-
):
|
| 292 |
-
model = create_model(
|
| 293 |
-
model_name,
|
| 294 |
-
pretrained,
|
| 295 |
-
precision=precision,
|
| 296 |
-
device=device,
|
| 297 |
-
jit=jit,
|
| 298 |
-
force_quick_gelu=force_quick_gelu,
|
| 299 |
-
force_custom_text=force_custom_text,
|
| 300 |
-
force_patch_dropout=force_patch_dropout,
|
| 301 |
-
force_image_size=force_image_size,
|
| 302 |
-
pretrained_image=pretrained_image,
|
| 303 |
-
pretrained_hf=pretrained_hf,
|
| 304 |
-
cache_dir=cache_dir,
|
| 305 |
-
output_dict=output_dict,
|
| 306 |
-
)
|
| 307 |
-
|
| 308 |
-
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
|
| 309 |
-
image_std = image_std or getattr(model.visual, 'image_std', None)
|
| 310 |
-
preprocess_train = image_transform(
|
| 311 |
-
model.visual.image_size,
|
| 312 |
-
is_train=True,
|
| 313 |
-
mean=image_mean,
|
| 314 |
-
std=image_std,
|
| 315 |
-
aug_cfg=aug_cfg,
|
| 316 |
-
)
|
| 317 |
-
preprocess_val = image_transform(
|
| 318 |
-
model.visual.image_size,
|
| 319 |
-
is_train=False,
|
| 320 |
-
mean=image_mean,
|
| 321 |
-
std=image_std,
|
| 322 |
-
)
|
| 323 |
-
|
| 324 |
-
return model, preprocess_train, preprocess_val
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
def create_model_from_pretrained(
|
| 328 |
-
model_name: str,
|
| 329 |
-
pretrained: Optional[str] = None,
|
| 330 |
-
precision: str = 'fp32',
|
| 331 |
-
device: Union[str, torch.device] = 'cpu',
|
| 332 |
-
jit: bool = False,
|
| 333 |
-
force_quick_gelu: bool = False,
|
| 334 |
-
force_custom_text: bool = False,
|
| 335 |
-
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
|
| 336 |
-
return_transform: bool = True,
|
| 337 |
-
image_mean: Optional[Tuple[float, ...]] = None,
|
| 338 |
-
image_std: Optional[Tuple[float, ...]] = None,
|
| 339 |
-
cache_dir: Optional[str] = None,
|
| 340 |
-
):
|
| 341 |
-
model = create_model(
|
| 342 |
-
model_name,
|
| 343 |
-
pretrained,
|
| 344 |
-
precision=precision,
|
| 345 |
-
device=device,
|
| 346 |
-
jit=jit,
|
| 347 |
-
force_quick_gelu=force_quick_gelu,
|
| 348 |
-
force_custom_text=force_custom_text,
|
| 349 |
-
force_image_size=force_image_size,
|
| 350 |
-
cache_dir=cache_dir,
|
| 351 |
-
require_pretrained=True,
|
| 352 |
-
)
|
| 353 |
-
|
| 354 |
-
if not return_transform:
|
| 355 |
-
return model
|
| 356 |
-
|
| 357 |
-
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
|
| 358 |
-
image_std = image_std or getattr(model.visual, 'image_std', None)
|
| 359 |
-
preprocess = image_transform(
|
| 360 |
-
model.visual.image_size,
|
| 361 |
-
is_train=False,
|
| 362 |
-
mean=image_mean,
|
| 363 |
-
std=image_std,
|
| 364 |
-
)
|
| 365 |
-
|
| 366 |
-
return model, preprocess
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/generation_utils.py
DELETED
|
File without changes
|
open_clip/src/open_clip/hf_configs.py
DELETED
|
@@ -1,56 +0,0 @@
|
|
| 1 |
-
# HF architecture dict:
|
| 2 |
-
arch_dict = {
|
| 3 |
-
# https://huggingface.co/docs/transformers/model_doc/roberta#roberta
|
| 4 |
-
"roberta": {
|
| 5 |
-
"config_names": {
|
| 6 |
-
"context_length": "max_position_embeddings",
|
| 7 |
-
"vocab_size": "vocab_size",
|
| 8 |
-
"width": "hidden_size",
|
| 9 |
-
"heads": "num_attention_heads",
|
| 10 |
-
"layers": "num_hidden_layers",
|
| 11 |
-
"layer_attr": "layer",
|
| 12 |
-
"token_embeddings_attr": "embeddings"
|
| 13 |
-
},
|
| 14 |
-
"pooler": "mean_pooler",
|
| 15 |
-
},
|
| 16 |
-
# https://huggingface.co/docs/transformers/model_doc/xlm-roberta#transformers.XLMRobertaConfig
|
| 17 |
-
"xlm-roberta": {
|
| 18 |
-
"config_names": {
|
| 19 |
-
"context_length": "max_position_embeddings",
|
| 20 |
-
"vocab_size": "vocab_size",
|
| 21 |
-
"width": "hidden_size",
|
| 22 |
-
"heads": "num_attention_heads",
|
| 23 |
-
"layers": "num_hidden_layers",
|
| 24 |
-
"layer_attr": "layer",
|
| 25 |
-
"token_embeddings_attr": "embeddings"
|
| 26 |
-
},
|
| 27 |
-
"pooler": "mean_pooler",
|
| 28 |
-
},
|
| 29 |
-
# https://huggingface.co/docs/transformers/model_doc/mt5#mt5
|
| 30 |
-
"mt5": {
|
| 31 |
-
"config_names": {
|
| 32 |
-
# unlimited seqlen
|
| 33 |
-
# https://github.com/google-research/text-to-text-transfer-transformer/issues/273
|
| 34 |
-
# https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/t5/modeling_t5.py#L374
|
| 35 |
-
"context_length": "",
|
| 36 |
-
"vocab_size": "vocab_size",
|
| 37 |
-
"width": "d_model",
|
| 38 |
-
"heads": "num_heads",
|
| 39 |
-
"layers": "num_layers",
|
| 40 |
-
"layer_attr": "block",
|
| 41 |
-
"token_embeddings_attr": "embed_tokens"
|
| 42 |
-
},
|
| 43 |
-
"pooler": "mean_pooler",
|
| 44 |
-
},
|
| 45 |
-
# https://huggingface.co/docs/transformers/model_doc/bert
|
| 46 |
-
"bert": {
|
| 47 |
-
"config_names": {
|
| 48 |
-
"context_length": "max_position_embeddings",
|
| 49 |
-
"vocab_size": "vocab_size",
|
| 50 |
-
"width": "hidden_size",
|
| 51 |
-
"heads": "num_attention_heads",
|
| 52 |
-
"layers": "num_hidden_layers",
|
| 53 |
-
},
|
| 54 |
-
"pooler": "cls_pooler",
|
| 55 |
-
},
|
| 56 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/hf_model.py
DELETED
|
@@ -1,193 +0,0 @@
|
|
| 1 |
-
""" huggingface model adapter
|
| 2 |
-
|
| 3 |
-
Wraps HuggingFace transformers (https://github.com/huggingface/transformers) models for use as a text tower in CLIP model.
|
| 4 |
-
"""
|
| 5 |
-
import re
|
| 6 |
-
|
| 7 |
-
import torch
|
| 8 |
-
import torch.nn as nn
|
| 9 |
-
from torch import TensorType
|
| 10 |
-
|
| 11 |
-
try:
|
| 12 |
-
import transformers
|
| 13 |
-
from transformers import AutoModel, AutoTokenizer, AutoConfig, PretrainedConfig
|
| 14 |
-
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, \
|
| 15 |
-
BaseModelOutputWithPoolingAndCrossAttentions
|
| 16 |
-
except ImportError as e:
|
| 17 |
-
transformers = None
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
class BaseModelOutput:
|
| 21 |
-
pass
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
class PretrainedConfig:
|
| 25 |
-
pass
|
| 26 |
-
|
| 27 |
-
from .hf_configs import arch_dict
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
# utils
|
| 31 |
-
def _camel2snake(s):
|
| 32 |
-
return re.sub(r'(?<!^)(?=[A-Z])', '_', s).lower()
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
# TODO: ?last - for gpt-like models
|
| 36 |
-
_POOLERS = {}
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
def register_pooler(cls):
|
| 40 |
-
"""Decorator registering pooler class"""
|
| 41 |
-
_POOLERS[_camel2snake(cls.__name__)] = cls
|
| 42 |
-
return cls
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
@register_pooler
|
| 46 |
-
class MeanPooler(nn.Module):
|
| 47 |
-
"""Mean pooling"""
|
| 48 |
-
|
| 49 |
-
def forward(self, x: BaseModelOutput, attention_mask: TensorType):
|
| 50 |
-
masked_output = x.last_hidden_state * attention_mask.unsqueeze(-1)
|
| 51 |
-
return masked_output.sum(dim=1) / attention_mask.sum(-1, keepdim=True)
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
@register_pooler
|
| 55 |
-
class MaxPooler(nn.Module):
|
| 56 |
-
"""Max pooling"""
|
| 57 |
-
|
| 58 |
-
def forward(self, x: BaseModelOutput, attention_mask: TensorType):
|
| 59 |
-
masked_output = x.last_hidden_state.masked_fill(attention_mask.unsqueeze(-1), -torch.inf)
|
| 60 |
-
return masked_output.max(1).values
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
@register_pooler
|
| 64 |
-
class ClsPooler(nn.Module):
|
| 65 |
-
"""CLS token pooling"""
|
| 66 |
-
|
| 67 |
-
def __init__(self, use_pooler_output=True):
|
| 68 |
-
super().__init__()
|
| 69 |
-
self.cls_token_position = 0
|
| 70 |
-
self.use_pooler_output = use_pooler_output
|
| 71 |
-
|
| 72 |
-
def forward(self, x: BaseModelOutput, attention_mask: TensorType):
|
| 73 |
-
if (self.use_pooler_output and
|
| 74 |
-
isinstance(x, (BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions)) and
|
| 75 |
-
(x.pooler_output is not None)
|
| 76 |
-
):
|
| 77 |
-
return x.pooler_output
|
| 78 |
-
|
| 79 |
-
return x.last_hidden_state[:, self.cls_token_position, :]
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
@register_pooler
|
| 83 |
-
class ClsLastHiddenStatePooler(nn.Module):
|
| 84 |
-
"""CLS token pooling
|
| 85 |
-
NOTE: this is equivalent to ClsPooler above with use_pooler_output=False
|
| 86 |
-
"""
|
| 87 |
-
|
| 88 |
-
def __init__(self):
|
| 89 |
-
super().__init__()
|
| 90 |
-
self.cls_token_position = 0
|
| 91 |
-
|
| 92 |
-
def forward(self, x: BaseModelOutput, attention_mask: TensorType):
|
| 93 |
-
return x.last_hidden_state[:, self.cls_token_position, :]
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
class HFTextEncoder(nn.Module):
|
| 97 |
-
"""HuggingFace model adapter"""
|
| 98 |
-
output_tokens: torch.jit.Final[bool]
|
| 99 |
-
|
| 100 |
-
def __init__(
|
| 101 |
-
self,
|
| 102 |
-
model_name_or_path: str,
|
| 103 |
-
output_dim: int,
|
| 104 |
-
config: PretrainedConfig = None,
|
| 105 |
-
pooler_type: str = None,
|
| 106 |
-
proj: str = None,
|
| 107 |
-
pretrained: bool = True,
|
| 108 |
-
output_tokens: bool = False,
|
| 109 |
-
):
|
| 110 |
-
super().__init__()
|
| 111 |
-
self.output_tokens = output_tokens
|
| 112 |
-
self.output_dim = output_dim
|
| 113 |
-
|
| 114 |
-
# TODO: find better way to get this information
|
| 115 |
-
uses_transformer_pooler = (pooler_type == "cls_pooler")
|
| 116 |
-
|
| 117 |
-
if transformers is None:
|
| 118 |
-
raise RuntimeError("Please `pip install transformers` to use pre-trained HuggingFace models")
|
| 119 |
-
if config is None:
|
| 120 |
-
self.config = AutoConfig.from_pretrained(model_name_or_path)
|
| 121 |
-
create_func, model_args = (AutoModel.from_pretrained, model_name_or_path) if pretrained else (
|
| 122 |
-
AutoModel.from_config, self.config)
|
| 123 |
-
# TODO: do all model configs have this attribute? PretrainedConfig does so yes??
|
| 124 |
-
if hasattr(self.config, "is_encoder_decoder") and self.config.is_encoder_decoder:
|
| 125 |
-
self.transformer = create_func(model_args)
|
| 126 |
-
self.transformer = self.transformer.encoder
|
| 127 |
-
else:
|
| 128 |
-
self.transformer = create_func(model_args, add_pooling_layer=uses_transformer_pooler)
|
| 129 |
-
else:
|
| 130 |
-
self.config = config
|
| 131 |
-
self.transformer = AutoModel.from_config(config)
|
| 132 |
-
if pooler_type is None: # get default arch pooler
|
| 133 |
-
pooler_type = (arch_dict[self.config.model_type]["pooler"])
|
| 134 |
-
|
| 135 |
-
# FIXME downstream users of OpenCLIP models use these attr, need to verify valid across all models
|
| 136 |
-
self.vocab_size = getattr(self.config, 'vocab_size', 0)
|
| 137 |
-
self.context_length = getattr(self.config, 'max_position_embeddings', 0)
|
| 138 |
-
|
| 139 |
-
self.pooler = _POOLERS[pooler_type]()
|
| 140 |
-
|
| 141 |
-
d_model = getattr(self.config, arch_dict[self.config.model_type]["config_names"]["width"])
|
| 142 |
-
if (d_model == output_dim) and (proj is None): # do we always need a proj?
|
| 143 |
-
self.proj = nn.Identity()
|
| 144 |
-
elif proj == 'linear':
|
| 145 |
-
self.proj = nn.Linear(d_model, output_dim, bias=False)
|
| 146 |
-
elif proj == 'mlp':
|
| 147 |
-
hidden_size = (d_model + output_dim) // 2
|
| 148 |
-
self.proj = nn.Sequential(
|
| 149 |
-
nn.Linear(d_model, hidden_size, bias=False),
|
| 150 |
-
nn.GELU(),
|
| 151 |
-
nn.Linear(hidden_size, output_dim, bias=False),
|
| 152 |
-
)
|
| 153 |
-
|
| 154 |
-
def forward(self, x: TensorType):
|
| 155 |
-
attn_mask = (x != self.config.pad_token_id).long()
|
| 156 |
-
out = self.transformer(input_ids=x, attention_mask=attn_mask)
|
| 157 |
-
pooled_out = self.pooler(out, attn_mask)
|
| 158 |
-
projected = self.proj(pooled_out)
|
| 159 |
-
|
| 160 |
-
seq_len = out.last_hidden_state.shape[1]
|
| 161 |
-
tokens = (
|
| 162 |
-
out.last_hidden_state[:, torch.arange(seq_len) != self.pooler.cls_token_position, :]
|
| 163 |
-
if type(self.pooler) == ClsPooler
|
| 164 |
-
else out.last_hidden_state
|
| 165 |
-
)
|
| 166 |
-
|
| 167 |
-
if self.output_tokens:
|
| 168 |
-
return projected, tokens
|
| 169 |
-
return projected
|
| 170 |
-
|
| 171 |
-
def lock(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):
|
| 172 |
-
if not unlocked_layers: # full freezing
|
| 173 |
-
for n, p in self.transformer.named_parameters():
|
| 174 |
-
p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
|
| 175 |
-
return
|
| 176 |
-
|
| 177 |
-
encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer
|
| 178 |
-
layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
|
| 179 |
-
print(f"Unlocking {unlocked_layers}/{len(layer_list) + 1} layers of hf model")
|
| 180 |
-
embeddings = getattr(
|
| 181 |
-
self.transformer, arch_dict[self.config.model_type]["config_names"]["token_embeddings_attr"])
|
| 182 |
-
modules = [embeddings, *layer_list][:-unlocked_layers]
|
| 183 |
-
# freeze layers
|
| 184 |
-
for module in modules:
|
| 185 |
-
for n, p in module.named_parameters():
|
| 186 |
-
p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
|
| 187 |
-
|
| 188 |
-
@torch.jit.ignore
|
| 189 |
-
def set_grad_checkpointing(self, enable=True):
|
| 190 |
-
self.transformer.gradient_checkpointing_enable()
|
| 191 |
-
|
| 192 |
-
def init_parameters(self):
|
| 193 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/loss.py
DELETED
|
@@ -1,212 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import torch.nn as nn
|
| 3 |
-
from torch.nn import functional as F
|
| 4 |
-
|
| 5 |
-
try:
|
| 6 |
-
import torch.distributed.nn
|
| 7 |
-
from torch import distributed as dist
|
| 8 |
-
|
| 9 |
-
has_distributed = True
|
| 10 |
-
except ImportError:
|
| 11 |
-
has_distributed = False
|
| 12 |
-
|
| 13 |
-
try:
|
| 14 |
-
import horovod.torch as hvd
|
| 15 |
-
except ImportError:
|
| 16 |
-
hvd = None
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
def gather_features(
|
| 20 |
-
image_features,
|
| 21 |
-
text_features,
|
| 22 |
-
local_loss=False,
|
| 23 |
-
gather_with_grad=False,
|
| 24 |
-
rank=0,
|
| 25 |
-
world_size=1,
|
| 26 |
-
use_horovod=False
|
| 27 |
-
):
|
| 28 |
-
assert has_distributed, 'torch.distributed did not import correctly, please use a PyTorch version with support.'
|
| 29 |
-
if use_horovod:
|
| 30 |
-
assert hvd is not None, 'Please install horovod'
|
| 31 |
-
if gather_with_grad:
|
| 32 |
-
all_image_features = hvd.allgather(image_features)
|
| 33 |
-
all_text_features = hvd.allgather(text_features)
|
| 34 |
-
else:
|
| 35 |
-
with torch.no_grad():
|
| 36 |
-
all_image_features = hvd.allgather(image_features)
|
| 37 |
-
all_text_features = hvd.allgather(text_features)
|
| 38 |
-
if not local_loss:
|
| 39 |
-
# ensure grads for local rank when all_* features don't have a gradient
|
| 40 |
-
gathered_image_features = list(all_image_features.chunk(world_size, dim=0))
|
| 41 |
-
gathered_text_features = list(all_text_features.chunk(world_size, dim=0))
|
| 42 |
-
gathered_image_features[rank] = image_features
|
| 43 |
-
gathered_text_features[rank] = text_features
|
| 44 |
-
all_image_features = torch.cat(gathered_image_features, dim=0)
|
| 45 |
-
all_text_features = torch.cat(gathered_text_features, dim=0)
|
| 46 |
-
else:
|
| 47 |
-
# We gather tensors from all gpus
|
| 48 |
-
if gather_with_grad:
|
| 49 |
-
all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features), dim=0)
|
| 50 |
-
all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features), dim=0)
|
| 51 |
-
else:
|
| 52 |
-
gathered_image_features = [torch.zeros_like(image_features) for _ in range(world_size)]
|
| 53 |
-
gathered_text_features = [torch.zeros_like(text_features) for _ in range(world_size)]
|
| 54 |
-
dist.all_gather(gathered_image_features, image_features)
|
| 55 |
-
dist.all_gather(gathered_text_features, text_features)
|
| 56 |
-
if not local_loss:
|
| 57 |
-
# ensure grads for local rank when all_* features don't have a gradient
|
| 58 |
-
gathered_image_features[rank] = image_features
|
| 59 |
-
gathered_text_features[rank] = text_features
|
| 60 |
-
all_image_features = torch.cat(gathered_image_features, dim=0)
|
| 61 |
-
all_text_features = torch.cat(gathered_text_features, dim=0)
|
| 62 |
-
|
| 63 |
-
return all_image_features, all_text_features
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
class ClipLoss(nn.Module):
|
| 67 |
-
|
| 68 |
-
def __init__(
|
| 69 |
-
self,
|
| 70 |
-
local_loss=False,
|
| 71 |
-
gather_with_grad=False,
|
| 72 |
-
cache_labels=False,
|
| 73 |
-
rank=0,
|
| 74 |
-
world_size=1,
|
| 75 |
-
use_horovod=False,
|
| 76 |
-
):
|
| 77 |
-
super().__init__()
|
| 78 |
-
self.local_loss = local_loss
|
| 79 |
-
self.gather_with_grad = gather_with_grad
|
| 80 |
-
self.cache_labels = cache_labels
|
| 81 |
-
self.rank = rank
|
| 82 |
-
self.world_size = world_size
|
| 83 |
-
self.use_horovod = use_horovod
|
| 84 |
-
|
| 85 |
-
# cache state
|
| 86 |
-
self.prev_num_logits = 0
|
| 87 |
-
self.labels = {}
|
| 88 |
-
|
| 89 |
-
def get_ground_truth(self, device, num_logits) -> torch.Tensor:
|
| 90 |
-
# calculated ground-truth and cache if enabled
|
| 91 |
-
if self.prev_num_logits != num_logits or device not in self.labels:
|
| 92 |
-
labels = torch.arange(num_logits, device=device, dtype=torch.long)
|
| 93 |
-
if self.world_size > 1 and self.local_loss:
|
| 94 |
-
labels = labels + num_logits * self.rank
|
| 95 |
-
if self.cache_labels:
|
| 96 |
-
self.labels[device] = labels
|
| 97 |
-
self.prev_num_logits = num_logits
|
| 98 |
-
else:
|
| 99 |
-
labels = self.labels[device]
|
| 100 |
-
return labels
|
| 101 |
-
|
| 102 |
-
def get_logits(self, image_features, text_features, logit_scale):
|
| 103 |
-
if self.world_size > 1:
|
| 104 |
-
all_image_features, all_text_features = gather_features(
|
| 105 |
-
image_features, text_features,
|
| 106 |
-
self.local_loss, self.gather_with_grad, self.rank, self.world_size, self.use_horovod)
|
| 107 |
-
|
| 108 |
-
if self.local_loss:
|
| 109 |
-
logits_per_image = logit_scale * image_features @ all_text_features.T
|
| 110 |
-
logits_per_text = logit_scale * text_features @ all_image_features.T
|
| 111 |
-
else:
|
| 112 |
-
logits_per_image = logit_scale * all_image_features @ all_text_features.T
|
| 113 |
-
logits_per_text = logits_per_image.T
|
| 114 |
-
else:
|
| 115 |
-
logits_per_image = logit_scale * image_features @ text_features.T
|
| 116 |
-
logits_per_text = logit_scale * text_features @ image_features.T
|
| 117 |
-
|
| 118 |
-
return logits_per_image, logits_per_text
|
| 119 |
-
|
| 120 |
-
def forward(self, image_features, text_features, logit_scale, output_dict=False):
|
| 121 |
-
device = image_features.device
|
| 122 |
-
logits_per_image, logits_per_text = self.get_logits(image_features, text_features, logit_scale)
|
| 123 |
-
|
| 124 |
-
labels = self.get_ground_truth(device, logits_per_image.shape[0])
|
| 125 |
-
|
| 126 |
-
total_loss = (
|
| 127 |
-
F.cross_entropy(logits_per_image, labels) +
|
| 128 |
-
F.cross_entropy(logits_per_text, labels)
|
| 129 |
-
) / 2
|
| 130 |
-
|
| 131 |
-
return {"contrastive_loss": total_loss} if output_dict else total_loss
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
class CoCaLoss(ClipLoss):
|
| 135 |
-
def __init__(
|
| 136 |
-
self,
|
| 137 |
-
caption_loss_weight,
|
| 138 |
-
clip_loss_weight,
|
| 139 |
-
pad_id=0, # pad_token for open_clip custom tokenizer
|
| 140 |
-
local_loss=False,
|
| 141 |
-
gather_with_grad=False,
|
| 142 |
-
cache_labels=False,
|
| 143 |
-
rank=0,
|
| 144 |
-
world_size=1,
|
| 145 |
-
use_horovod=False,
|
| 146 |
-
):
|
| 147 |
-
super().__init__(
|
| 148 |
-
local_loss=local_loss,
|
| 149 |
-
gather_with_grad=gather_with_grad,
|
| 150 |
-
cache_labels=cache_labels,
|
| 151 |
-
rank=rank,
|
| 152 |
-
world_size=world_size,
|
| 153 |
-
use_horovod=use_horovod
|
| 154 |
-
)
|
| 155 |
-
|
| 156 |
-
self.clip_loss_weight = clip_loss_weight
|
| 157 |
-
self.caption_loss_weight = caption_loss_weight
|
| 158 |
-
self.caption_loss = nn.CrossEntropyLoss(ignore_index=pad_id)
|
| 159 |
-
|
| 160 |
-
def forward(self, image_features, text_features, logits, labels, logit_scale, output_dict=False):
|
| 161 |
-
clip_loss = super().forward(image_features, text_features, logit_scale)
|
| 162 |
-
clip_loss = self.clip_loss_weight * clip_loss
|
| 163 |
-
|
| 164 |
-
caption_loss = self.caption_loss(
|
| 165 |
-
logits.permute(0, 2, 1),
|
| 166 |
-
labels,
|
| 167 |
-
)
|
| 168 |
-
caption_loss = caption_loss * self.caption_loss_weight
|
| 169 |
-
|
| 170 |
-
if output_dict:
|
| 171 |
-
return {"contrastive_loss": clip_loss, "caption_loss": caption_loss}
|
| 172 |
-
|
| 173 |
-
return clip_loss, caption_loss
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
class DistillClipLoss(ClipLoss):
|
| 177 |
-
|
| 178 |
-
def dist_loss(self, teacher_logits, student_logits):
|
| 179 |
-
return -(teacher_logits.softmax(dim=1) * student_logits.log_softmax(dim=1)).sum(dim=1).mean(dim=0)
|
| 180 |
-
|
| 181 |
-
def forward(
|
| 182 |
-
self,
|
| 183 |
-
image_features,
|
| 184 |
-
text_features,
|
| 185 |
-
logit_scale,
|
| 186 |
-
dist_image_features,
|
| 187 |
-
dist_text_features,
|
| 188 |
-
dist_logit_scale,
|
| 189 |
-
output_dict=False,
|
| 190 |
-
):
|
| 191 |
-
logits_per_image, logits_per_text = \
|
| 192 |
-
self.get_logits(image_features, text_features, logit_scale)
|
| 193 |
-
|
| 194 |
-
dist_logits_per_image, dist_logits_per_text = \
|
| 195 |
-
self.get_logits(dist_image_features, dist_text_features, dist_logit_scale)
|
| 196 |
-
|
| 197 |
-
labels = self.get_ground_truth(image_features.device, logits_per_image.shape[0])
|
| 198 |
-
|
| 199 |
-
contrastive_loss = (
|
| 200 |
-
F.cross_entropy(logits_per_image, labels) +
|
| 201 |
-
F.cross_entropy(logits_per_text, labels)
|
| 202 |
-
) / 2
|
| 203 |
-
|
| 204 |
-
distill_loss = (
|
| 205 |
-
self.dist_loss(dist_logits_per_image, logits_per_image) +
|
| 206 |
-
self.dist_loss(dist_logits_per_text, logits_per_text)
|
| 207 |
-
) / 2
|
| 208 |
-
|
| 209 |
-
if output_dict:
|
| 210 |
-
return {"contrastive_loss": contrastive_loss, "distill_loss": distill_loss}
|
| 211 |
-
|
| 212 |
-
return contrastive_loss, distill_loss
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/model.py
DELETED
|
@@ -1,448 +0,0 @@
|
|
| 1 |
-
""" CLIP Model
|
| 2 |
-
|
| 3 |
-
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
|
| 4 |
-
"""
|
| 5 |
-
from dataclasses import dataclass
|
| 6 |
-
import logging
|
| 7 |
-
import math
|
| 8 |
-
from typing import Optional, Tuple, Union
|
| 9 |
-
|
| 10 |
-
import numpy as np
|
| 11 |
-
import torch
|
| 12 |
-
import torch.nn.functional as F
|
| 13 |
-
from torch import nn
|
| 14 |
-
from torch.utils.checkpoint import checkpoint
|
| 15 |
-
|
| 16 |
-
from .hf_model import HFTextEncoder
|
| 17 |
-
from .modified_resnet import ModifiedResNet
|
| 18 |
-
from .timm_model import TimmModel
|
| 19 |
-
from .transformer import LayerNormFp32, LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer
|
| 20 |
-
from .utils import to_2tuple
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
@dataclass
|
| 24 |
-
class CLIPVisionCfg:
|
| 25 |
-
layers: Union[Tuple[int, int, int, int], int] = 12
|
| 26 |
-
width: int = 768
|
| 27 |
-
head_width: int = 64
|
| 28 |
-
mlp_ratio: float = 4.0
|
| 29 |
-
patch_size: int = 16
|
| 30 |
-
image_size: Union[Tuple[int, int], int] = 224
|
| 31 |
-
ls_init_value: Optional[float] = None # layer scale initial value
|
| 32 |
-
patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results
|
| 33 |
-
input_patchnorm: bool = False # whether to use dual patchnorm - would only apply the input layernorm on each patch, as post-layernorm already exist in original clip vit design
|
| 34 |
-
global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580)
|
| 35 |
-
attentional_pool: bool = False # whether to use attentional pooler in the last embedding layer
|
| 36 |
-
n_queries: int = 256 # n_queries for attentional pooler
|
| 37 |
-
attn_pooler_heads: int = 8 # n heads for attentional_pooling
|
| 38 |
-
timm_model_name: str = None # a valid model name overrides layers, width, patch_size
|
| 39 |
-
timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model
|
| 40 |
-
timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
|
| 41 |
-
timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '')
|
| 42 |
-
timm_proj_bias: bool = False # enable bias final projection
|
| 43 |
-
timm_drop: float = 0. # head dropout
|
| 44 |
-
timm_drop_path: Optional[float] = None # backbone stochastic depth
|
| 45 |
-
output_tokens: bool = False
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
@dataclass
|
| 49 |
-
class CLIPTextCfg:
|
| 50 |
-
context_length: int = 77
|
| 51 |
-
vocab_size: int = 49408
|
| 52 |
-
width: int = 512
|
| 53 |
-
heads: int = 8
|
| 54 |
-
layers: int = 12
|
| 55 |
-
ls_init_value: Optional[float] = None # layer scale initial value
|
| 56 |
-
hf_model_name: str = None
|
| 57 |
-
hf_tokenizer_name: str = None
|
| 58 |
-
hf_model_pretrained: bool = True
|
| 59 |
-
proj: str = 'mlp'
|
| 60 |
-
pooler_type: str = 'mean_pooler'
|
| 61 |
-
embed_cls: bool = False
|
| 62 |
-
pad_id: int = 0
|
| 63 |
-
output_tokens: bool = False
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
def get_cast_dtype(precision: str):
|
| 67 |
-
cast_dtype = None
|
| 68 |
-
if precision == 'bf16':
|
| 69 |
-
cast_dtype = torch.bfloat16
|
| 70 |
-
elif precision == 'fp16':
|
| 71 |
-
cast_dtype = torch.float16
|
| 72 |
-
return cast_dtype
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
def _build_vision_tower(
|
| 76 |
-
embed_dim: int,
|
| 77 |
-
vision_cfg: CLIPVisionCfg,
|
| 78 |
-
quick_gelu: bool = False,
|
| 79 |
-
cast_dtype: Optional[torch.dtype] = None
|
| 80 |
-
):
|
| 81 |
-
if isinstance(vision_cfg, dict):
|
| 82 |
-
vision_cfg = CLIPVisionCfg(**vision_cfg)
|
| 83 |
-
|
| 84 |
-
# OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
|
| 85 |
-
# memory efficient in recent PyTorch releases (>= 1.10).
|
| 86 |
-
# NOTE: timm models always use native GELU regardless of quick_gelu flag.
|
| 87 |
-
act_layer = QuickGELU if quick_gelu else nn.GELU
|
| 88 |
-
|
| 89 |
-
if vision_cfg.timm_model_name:
|
| 90 |
-
visual = TimmModel(
|
| 91 |
-
vision_cfg.timm_model_name,
|
| 92 |
-
pretrained=vision_cfg.timm_model_pretrained,
|
| 93 |
-
pool=vision_cfg.timm_pool,
|
| 94 |
-
proj=vision_cfg.timm_proj,
|
| 95 |
-
proj_bias=vision_cfg.timm_proj_bias,
|
| 96 |
-
drop=vision_cfg.timm_drop,
|
| 97 |
-
drop_path=vision_cfg.timm_drop_path,
|
| 98 |
-
embed_dim=embed_dim,
|
| 99 |
-
image_size=vision_cfg.image_size,
|
| 100 |
-
)
|
| 101 |
-
act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models
|
| 102 |
-
elif isinstance(vision_cfg.layers, (tuple, list)):
|
| 103 |
-
vision_heads = vision_cfg.width * 32 // vision_cfg.head_width
|
| 104 |
-
visual = ModifiedResNet(
|
| 105 |
-
layers=vision_cfg.layers,
|
| 106 |
-
output_dim=embed_dim,
|
| 107 |
-
heads=vision_heads,
|
| 108 |
-
image_size=vision_cfg.image_size,
|
| 109 |
-
width=vision_cfg.width,
|
| 110 |
-
)
|
| 111 |
-
else:
|
| 112 |
-
vision_heads = vision_cfg.width // vision_cfg.head_width
|
| 113 |
-
norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
|
| 114 |
-
visual = VisionTransformer(
|
| 115 |
-
image_size=vision_cfg.image_size,
|
| 116 |
-
patch_size=vision_cfg.patch_size,
|
| 117 |
-
width=vision_cfg.width,
|
| 118 |
-
layers=vision_cfg.layers,
|
| 119 |
-
heads=vision_heads,
|
| 120 |
-
mlp_ratio=vision_cfg.mlp_ratio,
|
| 121 |
-
ls_init_value=vision_cfg.ls_init_value,
|
| 122 |
-
patch_dropout=vision_cfg.patch_dropout,
|
| 123 |
-
input_patchnorm=vision_cfg.input_patchnorm,
|
| 124 |
-
global_average_pool=vision_cfg.global_average_pool,
|
| 125 |
-
attentional_pool=vision_cfg.attentional_pool,
|
| 126 |
-
n_queries=vision_cfg.n_queries,
|
| 127 |
-
attn_pooler_heads=vision_cfg.attn_pooler_heads,
|
| 128 |
-
output_tokens=vision_cfg.output_tokens,
|
| 129 |
-
output_dim=embed_dim,
|
| 130 |
-
act_layer=act_layer,
|
| 131 |
-
norm_layer=norm_layer,
|
| 132 |
-
)
|
| 133 |
-
|
| 134 |
-
return visual
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
def _build_text_tower(
|
| 138 |
-
embed_dim: int,
|
| 139 |
-
text_cfg: CLIPTextCfg,
|
| 140 |
-
quick_gelu: bool = False,
|
| 141 |
-
cast_dtype: Optional[torch.dtype] = None,
|
| 142 |
-
):
|
| 143 |
-
if isinstance(text_cfg, dict):
|
| 144 |
-
text_cfg = CLIPTextCfg(**text_cfg)
|
| 145 |
-
|
| 146 |
-
if text_cfg.hf_model_name:
|
| 147 |
-
text = HFTextEncoder(
|
| 148 |
-
text_cfg.hf_model_name,
|
| 149 |
-
output_dim=embed_dim,
|
| 150 |
-
proj=text_cfg.proj,
|
| 151 |
-
pooler_type=text_cfg.pooler_type,
|
| 152 |
-
pretrained=text_cfg.hf_model_pretrained,
|
| 153 |
-
output_tokens=text_cfg.output_tokens,
|
| 154 |
-
)
|
| 155 |
-
else:
|
| 156 |
-
act_layer = QuickGELU if quick_gelu else nn.GELU
|
| 157 |
-
norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
|
| 158 |
-
|
| 159 |
-
text = TextTransformer(
|
| 160 |
-
context_length=text_cfg.context_length,
|
| 161 |
-
vocab_size=text_cfg.vocab_size,
|
| 162 |
-
width=text_cfg.width,
|
| 163 |
-
heads=text_cfg.heads,
|
| 164 |
-
layers=text_cfg.layers,
|
| 165 |
-
ls_init_value=text_cfg.ls_init_value,
|
| 166 |
-
output_dim=embed_dim,
|
| 167 |
-
embed_cls=text_cfg.embed_cls,
|
| 168 |
-
output_tokens=text_cfg.output_tokens,
|
| 169 |
-
pad_id=text_cfg.pad_id,
|
| 170 |
-
act_layer=act_layer,
|
| 171 |
-
norm_layer=norm_layer,
|
| 172 |
-
)
|
| 173 |
-
return text
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
class CLIP(nn.Module):
|
| 177 |
-
output_dict: torch.jit.Final[bool]
|
| 178 |
-
|
| 179 |
-
def __init__(
|
| 180 |
-
self,
|
| 181 |
-
embed_dim: int,
|
| 182 |
-
vision_cfg: CLIPVisionCfg,
|
| 183 |
-
text_cfg: CLIPTextCfg,
|
| 184 |
-
quick_gelu: bool = False,
|
| 185 |
-
cast_dtype: Optional[torch.dtype] = None,
|
| 186 |
-
output_dict: bool = False,
|
| 187 |
-
):
|
| 188 |
-
super().__init__()
|
| 189 |
-
self.output_dict = output_dict
|
| 190 |
-
self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
|
| 191 |
-
|
| 192 |
-
text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
|
| 193 |
-
self.transformer = text.transformer
|
| 194 |
-
self.context_length = text.context_length
|
| 195 |
-
self.vocab_size = text.vocab_size
|
| 196 |
-
self.token_embedding = text.token_embedding
|
| 197 |
-
self.positional_embedding = text.positional_embedding
|
| 198 |
-
self.ln_final = text.ln_final
|
| 199 |
-
self.text_projection = text.text_projection
|
| 200 |
-
self.register_buffer('attn_mask', text.attn_mask, persistent=False)
|
| 201 |
-
|
| 202 |
-
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
|
| 203 |
-
|
| 204 |
-
def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
|
| 205 |
-
# lock image tower as per LiT - https://arxiv.org/abs/2111.07991
|
| 206 |
-
self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
|
| 207 |
-
|
| 208 |
-
@torch.jit.ignore
|
| 209 |
-
def set_grad_checkpointing(self, enable=True):
|
| 210 |
-
self.visual.set_grad_checkpointing(enable)
|
| 211 |
-
self.transformer.grad_checkpointing = enable
|
| 212 |
-
|
| 213 |
-
def encode_image(self, image, normalize: bool = False):
|
| 214 |
-
features = self.visual(image)
|
| 215 |
-
return F.normalize(features, dim=-1) if normalize else features
|
| 216 |
-
|
| 217 |
-
def encode_text(self, text, normalize: bool = False):
|
| 218 |
-
cast_dtype = self.transformer.get_cast_dtype()
|
| 219 |
-
|
| 220 |
-
x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
|
| 221 |
-
|
| 222 |
-
x = x + self.positional_embedding.to(cast_dtype)
|
| 223 |
-
x = x.permute(1, 0, 2) # NLD -> LND
|
| 224 |
-
x = self.transformer(x, attn_mask=self.attn_mask)
|
| 225 |
-
x = x.permute(1, 0, 2) # LND -> NLD
|
| 226 |
-
x = self.ln_final(x) # [batch_size, n_ctx, transformer.width]
|
| 227 |
-
# take features from the eot embedding (eot_token is the highest number in each sequence)
|
| 228 |
-
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
|
| 229 |
-
return F.normalize(x, dim=-1) if normalize else x
|
| 230 |
-
|
| 231 |
-
def forward(self, image, text):
|
| 232 |
-
image_features = self.encode_image(image, normalize=True)
|
| 233 |
-
text_features = self.encode_text(text, normalize=True)
|
| 234 |
-
if self.output_dict:
|
| 235 |
-
return {
|
| 236 |
-
"image_features": image_features,
|
| 237 |
-
"text_features": text_features,
|
| 238 |
-
"logit_scale": self.logit_scale.exp()
|
| 239 |
-
}
|
| 240 |
-
return image_features, text_features, self.logit_scale.exp()
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
class CustomTextCLIP(nn.Module):
|
| 244 |
-
output_dict: torch.jit.Final[bool]
|
| 245 |
-
|
| 246 |
-
def __init__(
|
| 247 |
-
self,
|
| 248 |
-
embed_dim: int,
|
| 249 |
-
vision_cfg: CLIPVisionCfg,
|
| 250 |
-
text_cfg: CLIPTextCfg,
|
| 251 |
-
quick_gelu: bool = False,
|
| 252 |
-
cast_dtype: Optional[torch.dtype] = None,
|
| 253 |
-
output_dict: bool = False,
|
| 254 |
-
):
|
| 255 |
-
super().__init__()
|
| 256 |
-
self.output_dict = output_dict
|
| 257 |
-
self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
|
| 258 |
-
self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
|
| 259 |
-
self.context_length = self.text.context_length
|
| 260 |
-
self.vocab_size = self.text.vocab_size
|
| 261 |
-
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
|
| 262 |
-
|
| 263 |
-
def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
|
| 264 |
-
# lock image tower as per LiT - https://arxiv.org/abs/2111.07991
|
| 265 |
-
self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
|
| 266 |
-
|
| 267 |
-
def lock_text_tower(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):
|
| 268 |
-
self.text.lock(unlocked_layers, freeze_layer_norm)
|
| 269 |
-
|
| 270 |
-
@torch.jit.ignore
|
| 271 |
-
def set_grad_checkpointing(self, enable=True):
|
| 272 |
-
self.visual.set_grad_checkpointing(enable)
|
| 273 |
-
self.text.set_grad_checkpointing(enable)
|
| 274 |
-
|
| 275 |
-
def encode_image(self, image, normalize: bool = False):
|
| 276 |
-
features = self.visual(image)
|
| 277 |
-
return F.normalize(features, dim=-1) if normalize else features
|
| 278 |
-
|
| 279 |
-
def encode_text(self, text, normalize: bool = False):
|
| 280 |
-
features = self.text(text)
|
| 281 |
-
return F.normalize(features, dim=-1) if normalize else features
|
| 282 |
-
|
| 283 |
-
def forward(self, image, text):
|
| 284 |
-
image_features = self.encode_image(image, normalize=True)
|
| 285 |
-
text_features = self.encode_text(text, normalize=True)
|
| 286 |
-
if self.output_dict:
|
| 287 |
-
return {
|
| 288 |
-
"image_features": image_features,
|
| 289 |
-
"text_features": text_features,
|
| 290 |
-
"logit_scale": self.logit_scale.exp()
|
| 291 |
-
}
|
| 292 |
-
return image_features, text_features, self.logit_scale.exp()
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
def convert_weights_to_lp(model: nn.Module, dtype=torch.float16):
|
| 296 |
-
"""Convert applicable model parameters to low-precision (bf16 or fp16)"""
|
| 297 |
-
|
| 298 |
-
def _convert_weights(l):
|
| 299 |
-
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
|
| 300 |
-
l.weight.data = l.weight.data.to(dtype)
|
| 301 |
-
if l.bias is not None:
|
| 302 |
-
l.bias.data = l.bias.data.to(dtype)
|
| 303 |
-
|
| 304 |
-
if isinstance(l, (nn.MultiheadAttention, Attention)):
|
| 305 |
-
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
|
| 306 |
-
tensor = getattr(l, attr)
|
| 307 |
-
if tensor is not None:
|
| 308 |
-
tensor.data = tensor.data.to(dtype)
|
| 309 |
-
|
| 310 |
-
for name in ["text_projection", "proj"]:
|
| 311 |
-
if hasattr(l, name):
|
| 312 |
-
attr = getattr(l, name)
|
| 313 |
-
if attr is not None:
|
| 314 |
-
attr.data = attr.data.to(dtype)
|
| 315 |
-
|
| 316 |
-
model.apply(_convert_weights)
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
convert_weights_to_fp16 = convert_weights_to_lp # backwards compat
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
# used to maintain checkpoint compatibility
|
| 323 |
-
def convert_to_custom_text_state_dict(state_dict: dict):
|
| 324 |
-
if 'text_projection' in state_dict:
|
| 325 |
-
# old format state_dict, move text tower -> .text
|
| 326 |
-
new_state_dict = {}
|
| 327 |
-
for k, v in state_dict.items():
|
| 328 |
-
if any(k.startswith(p) for p in (
|
| 329 |
-
'text_projection',
|
| 330 |
-
'positional_embedding',
|
| 331 |
-
'token_embedding',
|
| 332 |
-
'transformer',
|
| 333 |
-
'ln_final',
|
| 334 |
-
)):
|
| 335 |
-
k = 'text.' + k
|
| 336 |
-
new_state_dict[k] = v
|
| 337 |
-
return new_state_dict
|
| 338 |
-
return state_dict
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
def build_model_from_openai_state_dict(
|
| 342 |
-
state_dict: dict,
|
| 343 |
-
quick_gelu=True,
|
| 344 |
-
cast_dtype=torch.float16,
|
| 345 |
-
):
|
| 346 |
-
vit = "visual.proj" in state_dict
|
| 347 |
-
|
| 348 |
-
if vit:
|
| 349 |
-
vision_width = state_dict["visual.conv1.weight"].shape[0]
|
| 350 |
-
vision_layers = len(
|
| 351 |
-
[k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
|
| 352 |
-
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
|
| 353 |
-
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
|
| 354 |
-
image_size = vision_patch_size * grid_size
|
| 355 |
-
else:
|
| 356 |
-
counts: list = [
|
| 357 |
-
len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
|
| 358 |
-
vision_layers = tuple(counts)
|
| 359 |
-
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
|
| 360 |
-
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
|
| 361 |
-
vision_patch_size = None
|
| 362 |
-
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
|
| 363 |
-
image_size = output_width * 32
|
| 364 |
-
|
| 365 |
-
embed_dim = state_dict["text_projection"].shape[1]
|
| 366 |
-
context_length = state_dict["positional_embedding"].shape[0]
|
| 367 |
-
vocab_size = state_dict["token_embedding.weight"].shape[0]
|
| 368 |
-
transformer_width = state_dict["ln_final.weight"].shape[0]
|
| 369 |
-
transformer_heads = transformer_width // 64
|
| 370 |
-
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
|
| 371 |
-
|
| 372 |
-
vision_cfg = CLIPVisionCfg(
|
| 373 |
-
layers=vision_layers,
|
| 374 |
-
width=vision_width,
|
| 375 |
-
patch_size=vision_patch_size,
|
| 376 |
-
image_size=image_size,
|
| 377 |
-
)
|
| 378 |
-
text_cfg = CLIPTextCfg(
|
| 379 |
-
context_length=context_length,
|
| 380 |
-
vocab_size=vocab_size,
|
| 381 |
-
width=transformer_width,
|
| 382 |
-
heads=transformer_heads,
|
| 383 |
-
layers=transformer_layers,
|
| 384 |
-
)
|
| 385 |
-
model = CLIP(
|
| 386 |
-
embed_dim,
|
| 387 |
-
vision_cfg=vision_cfg,
|
| 388 |
-
text_cfg=text_cfg,
|
| 389 |
-
quick_gelu=quick_gelu, # OpenAI models were trained with QuickGELU
|
| 390 |
-
cast_dtype=cast_dtype,
|
| 391 |
-
)
|
| 392 |
-
|
| 393 |
-
for key in ["input_resolution", "context_length", "vocab_size"]:
|
| 394 |
-
state_dict.pop(key, None)
|
| 395 |
-
|
| 396 |
-
convert_weights_to_fp16(model) # OpenAI state dicts are partially converted to float16
|
| 397 |
-
model.load_state_dict(state_dict)
|
| 398 |
-
return model.eval()
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
def trace_model(model, batch_size=256, device=torch.device('cpu')):
|
| 402 |
-
model.eval()
|
| 403 |
-
image_size = model.visual.image_size
|
| 404 |
-
example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)
|
| 405 |
-
example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)
|
| 406 |
-
model = torch.jit.trace_module(
|
| 407 |
-
model,
|
| 408 |
-
inputs=dict(
|
| 409 |
-
forward=(example_images, example_text),
|
| 410 |
-
encode_text=(example_text,),
|
| 411 |
-
encode_image=(example_images,)
|
| 412 |
-
))
|
| 413 |
-
model.visual.image_size = image_size
|
| 414 |
-
return model
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
def resize_pos_embed(state_dict, model, interpolation: str = 'bicubic', antialias: bool = True):
|
| 418 |
-
# Rescale the grid of position embeddings when loading from state_dict
|
| 419 |
-
old_pos_embed = state_dict.get('visual.positional_embedding', None)
|
| 420 |
-
if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):
|
| 421 |
-
return
|
| 422 |
-
grid_size = to_2tuple(model.visual.grid_size)
|
| 423 |
-
extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
|
| 424 |
-
new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
|
| 425 |
-
if new_seq_len == old_pos_embed.shape[0]:
|
| 426 |
-
return
|
| 427 |
-
|
| 428 |
-
if extra_tokens:
|
| 429 |
-
pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
|
| 430 |
-
else:
|
| 431 |
-
pos_emb_tok, pos_emb_img = None, old_pos_embed
|
| 432 |
-
old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
|
| 433 |
-
|
| 434 |
-
logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
|
| 435 |
-
pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
|
| 436 |
-
pos_emb_img = F.interpolate(
|
| 437 |
-
pos_emb_img,
|
| 438 |
-
size=grid_size,
|
| 439 |
-
mode=interpolation,
|
| 440 |
-
antialias=antialias,
|
| 441 |
-
align_corners=False,
|
| 442 |
-
)
|
| 443 |
-
pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
|
| 444 |
-
if pos_emb_tok is not None:
|
| 445 |
-
new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
|
| 446 |
-
else:
|
| 447 |
-
new_pos_embed = pos_emb_img
|
| 448 |
-
state_dict['visual.positional_embedding'] = new_pos_embed
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/model_configs/RN101-quickgelu.json
DELETED
|
@@ -1,22 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"embed_dim": 512,
|
| 3 |
-
"quick_gelu": true,
|
| 4 |
-
"vision_cfg": {
|
| 5 |
-
"image_size": 224,
|
| 6 |
-
"layers": [
|
| 7 |
-
3,
|
| 8 |
-
4,
|
| 9 |
-
23,
|
| 10 |
-
3
|
| 11 |
-
],
|
| 12 |
-
"width": 64,
|
| 13 |
-
"patch_size": null
|
| 14 |
-
},
|
| 15 |
-
"text_cfg": {
|
| 16 |
-
"context_length": 77,
|
| 17 |
-
"vocab_size": 49408,
|
| 18 |
-
"width": 512,
|
| 19 |
-
"heads": 8,
|
| 20 |
-
"layers": 12
|
| 21 |
-
}
|
| 22 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/model_configs/RN101.json
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"embed_dim": 512,
|
| 3 |
-
"vision_cfg": {
|
| 4 |
-
"image_size": 224,
|
| 5 |
-
"layers": [
|
| 6 |
-
3,
|
| 7 |
-
4,
|
| 8 |
-
23,
|
| 9 |
-
3
|
| 10 |
-
],
|
| 11 |
-
"width": 64,
|
| 12 |
-
"patch_size": null
|
| 13 |
-
},
|
| 14 |
-
"text_cfg": {
|
| 15 |
-
"context_length": 77,
|
| 16 |
-
"vocab_size": 49408,
|
| 17 |
-
"width": 512,
|
| 18 |
-
"heads": 8,
|
| 19 |
-
"layers": 12
|
| 20 |
-
}
|
| 21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/model_configs/RN50-quickgelu.json
DELETED
|
@@ -1,22 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"embed_dim": 1024,
|
| 3 |
-
"quick_gelu": true,
|
| 4 |
-
"vision_cfg": {
|
| 5 |
-
"image_size": 224,
|
| 6 |
-
"layers": [
|
| 7 |
-
3,
|
| 8 |
-
4,
|
| 9 |
-
6,
|
| 10 |
-
3
|
| 11 |
-
],
|
| 12 |
-
"width": 64,
|
| 13 |
-
"patch_size": null
|
| 14 |
-
},
|
| 15 |
-
"text_cfg": {
|
| 16 |
-
"context_length": 77,
|
| 17 |
-
"vocab_size": 49408,
|
| 18 |
-
"width": 512,
|
| 19 |
-
"heads": 8,
|
| 20 |
-
"layers": 12
|
| 21 |
-
}
|
| 22 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/model_configs/RN50.json
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"embed_dim": 1024,
|
| 3 |
-
"vision_cfg": {
|
| 4 |
-
"image_size": 224,
|
| 5 |
-
"layers": [
|
| 6 |
-
3,
|
| 7 |
-
4,
|
| 8 |
-
6,
|
| 9 |
-
3
|
| 10 |
-
],
|
| 11 |
-
"width": 64,
|
| 12 |
-
"patch_size": null
|
| 13 |
-
},
|
| 14 |
-
"text_cfg": {
|
| 15 |
-
"context_length": 77,
|
| 16 |
-
"vocab_size": 49408,
|
| 17 |
-
"width": 512,
|
| 18 |
-
"heads": 8,
|
| 19 |
-
"layers": 12
|
| 20 |
-
}
|
| 21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/model_configs/RN50x16.json
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"embed_dim": 768,
|
| 3 |
-
"vision_cfg": {
|
| 4 |
-
"image_size": 384,
|
| 5 |
-
"layers": [
|
| 6 |
-
6,
|
| 7 |
-
8,
|
| 8 |
-
18,
|
| 9 |
-
8
|
| 10 |
-
],
|
| 11 |
-
"width": 96,
|
| 12 |
-
"patch_size": null
|
| 13 |
-
},
|
| 14 |
-
"text_cfg": {
|
| 15 |
-
"context_length": 77,
|
| 16 |
-
"vocab_size": 49408,
|
| 17 |
-
"width": 768,
|
| 18 |
-
"heads": 12,
|
| 19 |
-
"layers": 12
|
| 20 |
-
}
|
| 21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/model_configs/RN50x4.json
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"embed_dim": 640,
|
| 3 |
-
"vision_cfg": {
|
| 4 |
-
"image_size": 288,
|
| 5 |
-
"layers": [
|
| 6 |
-
4,
|
| 7 |
-
6,
|
| 8 |
-
10,
|
| 9 |
-
6
|
| 10 |
-
],
|
| 11 |
-
"width": 80,
|
| 12 |
-
"patch_size": null
|
| 13 |
-
},
|
| 14 |
-
"text_cfg": {
|
| 15 |
-
"context_length": 77,
|
| 16 |
-
"vocab_size": 49408,
|
| 17 |
-
"width": 640,
|
| 18 |
-
"heads": 10,
|
| 19 |
-
"layers": 12
|
| 20 |
-
}
|
| 21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/model_configs/RN50x64.json
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"embed_dim": 1024,
|
| 3 |
-
"vision_cfg": {
|
| 4 |
-
"image_size": 448,
|
| 5 |
-
"layers": [
|
| 6 |
-
3,
|
| 7 |
-
15,
|
| 8 |
-
36,
|
| 9 |
-
10
|
| 10 |
-
],
|
| 11 |
-
"width": 128,
|
| 12 |
-
"patch_size": null
|
| 13 |
-
},
|
| 14 |
-
"text_cfg": {
|
| 15 |
-
"context_length": 77,
|
| 16 |
-
"vocab_size": 49408,
|
| 17 |
-
"width": 1024,
|
| 18 |
-
"heads": 16,
|
| 19 |
-
"layers": 12
|
| 20 |
-
}
|
| 21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
open_clip/src/open_clip/model_configs/ViT-B-16-plus-240.json
DELETED
|
@@ -1,16 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"embed_dim": 640,
|
| 3 |
-
"vision_cfg": {
|
| 4 |
-
"image_size": 240,
|
| 5 |
-
"layers": 12,
|
| 6 |
-
"width": 896,
|
| 7 |
-
"patch_size": 16
|
| 8 |
-
},
|
| 9 |
-
"text_cfg": {
|
| 10 |
-
"context_length": 77,
|
| 11 |
-
"vocab_size": 49408,
|
| 12 |
-
"width": 640,
|
| 13 |
-
"heads": 10,
|
| 14 |
-
"layers": 12
|
| 15 |
-
}
|
| 16 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|