bluemellophone commited on
Commit
8ee14ff
·
unverified ·
0 Parent(s):

Initial commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
.coveragerc ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [run]
2
+ branch = True
3
+ source =
4
+ cv4e_lecture13
5
+ omit =
6
+ setup.py
7
+
8
+ [report]
9
+ exclude_lines =
10
+ pragma: no cover
11
+ # NOCC
12
+ raise NotImplementedError
13
+ if __name__ == .__main__.:
14
+ precision = 1
15
+ ignore_errors = True
16
+ omit =
17
+ tests/*
18
+
19
+ [html]
20
+ directory = ./coverage/html/
21
+
22
+ [xml]
23
+ output = ./coverage/coverage.xml
.gitattributes ADDED
File without changes
.github/workflows/codeql-analysis.yml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "CodeQL"
2
+
3
+ on:
4
+ push:
5
+ branches: [ main ]
6
+ pull_request:
7
+ branches: [ main ]
8
+ schedule:
9
+ - cron: '0 0 * * 1'
10
+
11
+ jobs:
12
+ analyze:
13
+ name: Analyze
14
+ runs-on: ubuntu-latest
15
+
16
+ strategy:
17
+ fail-fast: false
18
+ matrix:
19
+ language: [ 'python' ]
20
+ steps:
21
+ - name: Checkout repository
22
+ uses: actions/checkout@v2
23
+
24
+ # Initializes the CodeQL tools for scanning.
25
+ - name: Initialize CodeQL
26
+ uses: github/codeql-action/init@v1
27
+ with:
28
+ languages: ${{ matrix.language }}
29
+
30
+ - name: Autobuild
31
+ uses: github/codeql-action/autobuild@v1
32
+
33
+ - name: Perform CodeQL Analysis
34
+ uses: github/codeql-action/analyze@v1
.github/workflows/docker-publish.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Docker
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - main
7
+ push:
8
+ branches:
9
+ - main
10
+ tags:
11
+ - v*
12
+ schedule:
13
+ - cron: '0 16 * * *' # Every day at 16:00 UTC (~09:00 PT)
14
+
15
+ jobs:
16
+ deploy:
17
+ name: Build Docker Image
18
+ runs-on: ubuntu-latest
19
+
20
+ steps:
21
+ - uses: actions/checkout@v2
22
+
23
+ - name: Build
24
+ run: |
25
+ docker build . -t cv4e/lecture13:latest
.github/workflows/python-publish.yml ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Wheel
2
+
3
+ # Build on every branch push, tag push, and pull request change:
4
+ on: push
5
+
6
+ jobs:
7
+
8
+ build_wheels:
9
+ name: Build on ${{ matrix.os }}
10
+ runs-on: ${{ matrix.os }}
11
+ strategy:
12
+ fail-fast: false
13
+ matrix:
14
+ os: [ubuntu-latest, macos-latest]
15
+ python-version: [3.8]
16
+
17
+ steps:
18
+ - uses: actions/checkout@v2
19
+ with:
20
+ # This allows the setuptools_scm library to discover the tag version from git
21
+ fetch-depth: 0
22
+
23
+ - uses: actions/setup-python@v2
24
+ name: Install Python
25
+ with:
26
+ python-version: ${{ matrix.python-version }}
27
+
28
+ - name: Build wheel
29
+ run: |
30
+ pip install --upgrade pip
31
+ pip install build
32
+ python -m build --wheel --outdir dist/ .
33
+
34
+ - uses: actions/upload-artifact@v2
35
+ with:
36
+ path: ./dist/*.whl
37
+
38
+ build_sdist:
39
+ name: Build source distribution
40
+ runs-on: ubuntu-latest
41
+ steps:
42
+ - uses: actions/checkout@v2
43
+
44
+ - uses: actions/setup-python@v2
45
+ name: Install Python
46
+ with:
47
+ python-version: '3.8'
48
+
49
+ - name: Build sdist
50
+ run: |
51
+ pip install --upgrade pip
52
+ pip install build
53
+ python -m build --sdist --outdir dist/ .
54
+
55
+ - uses: actions/upload-artifact@v2
56
+ with:
57
+ path: ./dist/*.tar.gz
58
+
59
+ # upload_pypi:
60
+ # needs: [build_wheels, build_sdist]
61
+ # runs-on: ubuntu-latest
62
+ # # upload to PyPI on every tag starting with 'v'
63
+ # if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/v')
64
+ # steps:
65
+ # - uses: actions/download-artifact@v2
66
+ # with:
67
+ # name: artifact
68
+ # path: dist
69
+
70
+ # - uses: pypa/gh-action-pypi-publish@master
71
+ # with:
72
+ # user: __token__
73
+ # password: ${{ secrets.PYPI_PASSWORD }}
74
+ # # To test: repository_url: https://test.pypi.org/legacy/
.github/workflows/testing.yml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This workflow will install Python dependencies, run tests and lint with a single version of Python
2
+ # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3
+
4
+ name: Testing
5
+
6
+ on: push
7
+
8
+ jobs:
9
+ test:
10
+ runs-on: ubuntu-latest
11
+ strategy:
12
+ fail-fast: false
13
+ matrix:
14
+ # Use the same Python version used the Dockerfile
15
+ python-version: [3.9]
16
+ env:
17
+ OS: ubuntu-latest
18
+ PYTHON: ${{ matrix.python-version }}
19
+ steps:
20
+ # Checkout and env setup
21
+ - uses: actions/checkout@v2
22
+
23
+ - name: Set up Python ${{ matrix.python-version }}
24
+ uses: actions/setup-python@v2
25
+ with:
26
+ python-version: ${{ matrix.python-version }}
27
+
28
+ - name: Install dependencies
29
+ run: |
30
+ python -m pip install --upgrade pip
31
+ pip install -r requirements.txt
32
+
33
+ - name: Lint with flake8
34
+ run: |
35
+ # stop the build if there are Python syntax errors or undefined names
36
+ flake8 . --count --show-source --statistics
37
+ # exit-zero treats all errors as warnings.
38
+ flake8 . --count --exit-zero --max-complexity=10 --statistics
39
+
40
+ - name: Run tests
41
+ run: |
42
+ set -ex
43
+ pytest --cov=./ --cov-append --random-order-seed=1
44
+
45
+ - name: Run coverage
46
+ run: |
47
+ coverage xml html
48
+ env:
49
+ LOG_WIDTH: 120
.gitignore ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .DS_Store
2
+ output.*.jpg
3
+ *.log*
4
+
5
+ *.egg-info/
6
+
7
+ cv4e_lecture13/datasets/
8
+ .coverage
9
+ coverage/
10
+
11
+ __pycache__/
12
+ docs/build
.pre-commit-config.yaml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # See https://pre-commit.com for more information
2
+ # See https://pre-commit.com/hooks.html for more hooks
3
+ repos:
4
+ - repo: https://github.com/asottile/pyupgrade
5
+ rev: v2.32.0
6
+ hooks:
7
+ - id: pyupgrade
8
+ name: pyupgrade
9
+ description: Run PyUpgrade on Python code.
10
+ - repo: https://github.com/pycqa/isort
11
+ rev: 5.10.1
12
+ hooks:
13
+ - id: isort
14
+ args: [--settings-path setup.cfg]
15
+ name: isort
16
+ description: Run import sorting (isort) on Python code.
17
+ - repo: local
18
+ hooks:
19
+ - id: brunette
20
+ name: brunette
21
+ description: Run Brunette on Python code (fork of Black).
22
+ entry: brunette --config=setup.cfg
23
+ language: system
24
+ types: [python]
25
+ - repo: https://gitlab.com/pycqa/flake8
26
+ rev: 3.8.3
27
+ hooks:
28
+ - id: flake8
29
+ - repo: https://github.com/pre-commit/pre-commit-hooks
30
+ rev: v3.1.0
31
+ hooks:
32
+ - id: check-ast
33
+ - id: check-executables-have-shebangs
34
+ - id: check-docstring-first
35
+ - id: double-quote-string-fixer
36
+ - id: trailing-whitespace
37
+ - id: mixed-line-ending
38
+ - id: end-of-file-fixer
39
+ - id: fix-encoding-pragma
.readthedocs.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # .readthedocs.yaml
2
+ # Read the Docs configuration file
3
+ # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4
+
5
+ # Required
6
+ version: 2
7
+
8
+ # Set the version of Python and other tools you might need
9
+ build:
10
+ os: ubuntu-22.04
11
+ tools:
12
+ python: "3.10"
13
+
14
+ # Build documentation in the docs/ directory with Sphinx
15
+ sphinx:
16
+ configuration: docs/conf.py
17
+
18
+ # Optionally declare the Python requirements required to build your docs
19
+ python:
20
+ install:
21
+ - requirements: docs/requirements.txt
22
+ - method: pip
23
+ path: .
CONTRIBUTING.md ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributing
2
+
3
+ All contributions are welcome and encouraged. There are a few guidelines and styling aspects that we require and encourage you to use so that we might see this project through many years of successful development.
4
+
5
+ ## Development Guidelines
6
+
7
+ ### Pull Request Checklist
8
+
9
+ To submit a pull request (PR), we require the following standards to be enforced. Details on how to configure and pass each of these required checks is detailed in the sections in this guideline section.
10
+
11
+ * **Ensure that the PR is properly formatted**
12
+ * **Ensure that the PR is properly rebased**
13
+ * **Ensure that the PR is properly tested**
14
+ * **Ensure that the PR is properly covered**
15
+ * **Ensure that the PR is properly sanitized**
16
+ * **Ensure that the PR is properly reviewed**
17
+
18
+ ## Code Style
19
+
20
+ The code is generally in PEP8 compliance, enforced by flake8 via pre-commit.
21
+
22
+ Our code uses Google-style docstrings. See examples of this in [Example Google Style Python Docstrings](https://www.sphinx-doc.org/en/master/usage/extensions/example_google.html#example-google).
23
+
24
+ ### Pre-commit
25
+
26
+ It's recommended that you use `pre-commit` to ensure linting procedures are run
27
+ on any commit you make. (See also [pre-commit.com](https://pre-commit.com/)
28
+
29
+ Reference [pre-commit's installation instructions](https://pre-commit.com/#install) for software installation on your OS/platform. After you have the software installed, run ``pre-commit install`` on the command line. Now every time you commit to this project's code base the linter procedures will automatically run over the changed files. To run pre-commit on files preemtively from the command line use:
30
+
31
+ ```bash
32
+ git add .
33
+ pre-commit run
34
+
35
+ # or
36
+
37
+ pre-commit run --all-files
38
+ ```
39
+
40
+ See `.pre-commit-config.yaml` for a list of configured linters and fixers.
Dockerfile ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FROM nvidia/cuda:11.3.1-cudnn8-runtime-ubuntu20.04
2
+ FROM continuumio/anaconda3:latest
3
+
4
+ ENV GRADIO_SERVER_PORT 5000
5
+
6
+ # Install apt packages
7
+ RUN set -ex \
8
+ && apt-get update \
9
+ && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
10
+ git \
11
+ htop \
12
+ && rm -rf /var/cache/apt \
13
+ && rm -rf /var/lib/apt/lists/*
14
+
15
+ WORKDIR /code
16
+
17
+ COPY ./ /code
18
+
19
+ RUN conda install pip \
20
+ && pip install -r requirements.txt
21
+
22
+ # Port for the web server
23
+ EXPOSE 5000
24
+
25
+ ENTRYPOINT ["python app.py"]
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) ECEO, EPFL. All rights reserved.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE
MANIFEST.in ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include pyproject.toml
2
+
3
+ # Include the README and SECURITY documents
4
+ include *.rst
5
+
6
+ # Include the license file
7
+ include LICENSE
README.rst ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ================================================
3
+ Lecture 1: Dataset Prototyping and Visualization
4
+ ================================================
5
+
6
+ .. contents:: Quick Links
7
+ :backlinks: none
8
+
9
+ .. sectnum::
10
+
11
+ Introduction
12
+ ------------
13
+
14
+ .. image:: https://github.com/CV4EcologySchool/Lecture-1/raw/main/intro.jpg
15
+ :target: https://docs.google.com/presentation/d/1Bm9ZvQC6Y1SW_xAHHbMvhsRRb87tgzIimM0YKEXEA6w/edit?usp=sharing
16
+ :alt: "Lecture 1: Dataset Prototyping and Visualization"
17
+
18
+ This repository holds the lecture materials for the `Computer Vision for Ecology workshop at CalTech <https://cv4ecology.caltech.edu>`_. The goal of this lecture is to describe which qualities are idea for prototype ML datasets and a review of PyTorch's DataLoaders and Tensors. Lecture 1 also reviews the overall lecture structure for the three week course, review the milestone due tomorrow (Week 1, Day 2), review the tools and technologies and terms that are common for ML applications.
19
+
20
+ The associated slides for this lecture can be viewed by clicking on the image above.
21
+
22
+ How to Install
23
+ --------------
24
+
25
+ You need to first install Anaconda on your machine. Below are the instructions on how to install Anaconda on an Apple macOS machine, but it is possible to install on a Windows and Linux machine as well. Consult the `official Anaconda page <https://www.anaconda.com>`_ to download and install on other systems. For Windows computers, it is highly recommended that you intall the `Windows Subsystem for Linux <https://docs.microsoft.com/en-us/windows/wsl/install>`_.
26
+
27
+ .. code:: bash
28
+
29
+ # Install Homebrew
30
+ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
31
+
32
+ # Install Anaconda and expose conda to the terminal
33
+ brew install anaconda
34
+ export PATH="/opt/homebrew/anaconda3/bin:$PATH"
35
+ conda init zsh
36
+ conda update conda
37
+
38
+ Once Anaconda is installed, you will need an environment and the following packages installed
39
+
40
+ .. code:: bash
41
+
42
+ # Create Environment
43
+ conda create --name cv4e
44
+ conda activate cv4e
45
+
46
+ # Install Python dependencies
47
+ conda install pip
48
+
49
+ conda install -r requirements.txt
50
+ conda install pytorch torchvision -c pytorch-nightly
51
+
52
+ How to Run
53
+ ----------
54
+
55
+ The lecture materials will run as a single executable. The MNIST dataset must be downloaded from the internet for this script to run correctly, so Internet access is required at first to download the files once. It is recommended to use `ipython` and to copy sections of code into and inspecting the
56
+
57
+ .. code:: bash
58
+
59
+ # Run with Python
60
+ python lecture.py
61
+
62
+ # Run with iPython
63
+ ipython lecture.py
64
+
65
+ # Run as an executable
66
+ ./lecture.py
67
+
68
+ Logging
69
+ -------
70
+
71
+ The script uses Python's built-in logging functionality called `logging`. All print functions are replaced with `log.info` within this script, which sends the output to two places: 1) the terminal window, 2) the file `lecture_1.log`. Get into the habit of writing text logs and keeping date-specific versions for comparison and debugging.
72
+
73
+ Code Formatting (Optional)
74
+ --------------------------
75
+
76
+ It's recommended that you use ``pre-commit`` to ensure linting procedures are run
77
+ on any code you write. (See also `pre-commit.com <https://pre-commit.com/>`_)
78
+
79
+ Reference `pre-commit's installation instructions <https://pre-commit.com/#install>`_ for software installation on your OS/platform. After you have the software installed, run ``pre-commit install`` on the command line. Now every time you commit to this project's code base the linter procedures will automatically run over the changed files. To run pre-commit on files preemtively from the command line use:
80
+
81
+ .. code:: bash
82
+
83
+ git add .
84
+ pre-commit run
85
+
86
+ # or
87
+
88
+ pre-commit run --all-files
89
+
90
+ The code base has been formatted by Brunette, which is a fork and more configurable version of Black (https://black.readthedocs.io/en/stable/). Furthermore, try to conform to PEP8. You should set up your preferred editor to use flake8 as its Python linter, but pre-commit will ensure compliance before a git commit is completed. This will use the flake8 configuration within ``setup.cfg``, which ignores several errors and stylistic considerations. See the ``setup.cfg`` file for a full and accurate listing of stylistic codes to ignore.
91
+
92
+ See Also
93
+ --------
94
+
95
+ - https://github.com/readthedocs-examples/example-sphinx-basic/
96
+ - https://github.com/CV4EcologySchool/ct_classifier
97
+ - https://docs.python.org/3/distutils/setupscript.html
98
+
99
+
100
+
101
+ brew install openssl
102
+
103
+ docker build . -t cv4e/lecture13:latest
104
+
105
+ pytest
106
+
107
+ coverage html
108
+
109
+ sphinx-build -M html . _build/
app.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from cv4e_lecture13 import model, utils
2
+ import torch
3
+ from PIL import Image, ImageOps # NOQA
4
+ from torchvision.transforms import Compose, Resize, ToTensor
5
+ import gradio as gr
6
+
7
+
8
+ config = 'cv4e_lecture13/configs/mnist_resnet18.yaml'
9
+
10
+ log = utils.init_logging()
11
+ cfg = utils.init_config(config, log)
12
+ device = cfg.get('device')
13
+
14
+ cfg['output'] = 'cv4e_lecture13/%s' % (cfg['output'], )
15
+
16
+ net, _, _ = model.load(cfg)
17
+ net.eval()
18
+
19
+
20
+ def predict(inp):
21
+ inp = ImageOps.grayscale(inp)
22
+
23
+ transforms = Compose([
24
+ Resize((cfg['image_size'])),
25
+ ToTensor()
26
+ ])
27
+ inp = transforms(inp).unsqueeze(0)
28
+ data = inp.to(device)
29
+
30
+ with torch.no_grad():
31
+ prediction = net(data)
32
+
33
+ confidences = torch.softmax(prediction[0], dim=0).cpu().numpy()
34
+ confidences = list(enumerate(confidences))
35
+ confidences = [
36
+ (str(label), float(conf) , )
37
+ for label, conf in confidences
38
+ ]
39
+ confidences = dict(confidences)
40
+
41
+ return confidences
42
+
43
+
44
+ interface = gr.Interface(
45
+ fn=predict,
46
+ inputs=gr.Image(type='pil'),
47
+ outputs=gr.Label(num_top_classes=3),
48
+ examples=[
49
+ f'examples/example_{index}.jpg'
50
+ for index in range(1, 31)
51
+ ]
52
+ )
53
+
54
+ interface.launch()
cv4e_lecture13/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ 2022 Benjamin Kellenberger
3
+ '''
4
+
5
+ # Info: these "__init__.py" files go into every folder and subfolder that
6
+ # contains Python code. It is required for Python to find all the scripts you
7
+ # created and for you to be able to import them.
8
+ version = '0.1.0'
9
+ __version__ = version
cv4e_lecture13/configs/mnist_resnet18.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Here's where you define experiment-specific hyperparameters.
2
+ # You can also create lists and group parameters together into nested sub-parts.
3
+ # In Python, this is all read as a dict.
4
+
5
+ # environment/computational parameters
6
+ seed: 1
7
+ device: cuda
8
+ num_workers: 4
9
+
10
+ # dataset parameters
11
+ num_classes: 10
12
+
13
+ # training hyperparameters
14
+ image_size: [32, 32]
15
+ max_epochs: 16
16
+ batch_size: 128
17
+ learning_rate: 0.001
18
+ weight_decay: 0.001
19
+
20
+ # output params
21
+ output: models
cv4e_lecture13/dataset.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Model implementation.
3
+ We'll be using a "simple" ResNet-18 for image classification here.
4
+
5
+ 2022 Benjamin Kellenberger
6
+ '''
7
+ import torch
8
+ from torchvision import datasets
9
+ from torchvision.transforms import Compose, Resize, ToTensor
10
+ from os.path import abspath
11
+
12
+
13
+ def load(cfg):
14
+ """
15
+ Load the MNIST dataset from PyTorch (download if needed) and return a DataLoader
16
+
17
+ MNIST is a sample dataset for machine learning, each image is 28-pixels high and 28-pixels wide (1 color channel)
18
+ """
19
+ root = abspath('datasets')
20
+
21
+ train = torch.utils.data.DataLoader(
22
+ datasets.MNIST(
23
+ root,
24
+ train=True,
25
+ transform=Compose([
26
+ Resize((cfg['image_size'])),
27
+ ToTensor()
28
+ ]),
29
+ download=True
30
+ ),
31
+ batch_size=cfg.get('batch_size'),
32
+ shuffle=True,
33
+ num_workers=cfg.get('num_workers')
34
+ )
35
+
36
+ test = torch.utils.data.DataLoader(
37
+ datasets.MNIST(
38
+ root,
39
+ train=False,
40
+ transform=Compose([
41
+ Resize((cfg['image_size'])),
42
+ ToTensor()
43
+ ]),
44
+ download=True
45
+ ),
46
+ batch_size=cfg.get('batch_size'),
47
+ shuffle=False,
48
+ num_workers=cfg.get('num_workers')
49
+ )
50
+
51
+ return train, test
cv4e_lecture13/model.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Model implementation.
3
+ We'll be using a "simple" ResNet-18 for image classification here.
4
+
5
+ 2022 Benjamin Kellenberger
6
+ '''
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ import numpy as np
12
+ import glob
13
+ import os
14
+ from os.path import split, splitext, exists
15
+
16
+
17
+ class SmallModel(nn.Module):
18
+
19
+ @classmethod
20
+ def load(cls, cfg):
21
+ log = cfg.get('log')
22
+
23
+ net = cls()
24
+
25
+ epoch = 0
26
+ best_loss = np.inf
27
+
28
+ output = cfg.get('output')
29
+
30
+ filepaths = sorted(glob.glob(f'{output}/*.pt'))
31
+
32
+ if len(filepaths) > 1:
33
+ filepaths = [
34
+ filepath
35
+ for filepath in filepaths
36
+ if 'best.pt' not in filepath
37
+ ]
38
+
39
+ if len(filepaths):
40
+ filepath = filepaths[-1]
41
+
42
+ log.info(f'Resuming from {filepath}')
43
+
44
+ state = torch.load(open(filepath, 'rb'), map_location='cpu')
45
+ net.load_state_dict(state['model'])
46
+
47
+ filename = split(filepath)[1]
48
+ try:
49
+ epoch = int(splitext(filename)[0])
50
+ except ValueError:
51
+ pass
52
+
53
+ filepath = f'{output}/best.pt'
54
+ if exists(filepath):
55
+ state = torch.load(open(filepath, 'rb'), map_location='cpu')
56
+ best_loss = state['loss_val']
57
+ else:
58
+ log.info('Starting new network model')
59
+
60
+ device = cfg.get('device')
61
+ net.to(device)
62
+
63
+ return net, epoch, best_loss
64
+
65
+ def __init__(self):
66
+ super(SmallModel, self).__init__()
67
+ self.conv1 = nn.Conv2d(1, 16, 5)
68
+ self.conv2 = nn.Conv2d(16, 32, 5)
69
+ self.fc1 = nn.Linear(32 * 5 * 5, 128)
70
+ self.fc2 = nn.Linear(128, 128)
71
+ self.fc3 = nn.Linear(128, 10)
72
+
73
+ def forward(self, x):
74
+ x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
75
+ x = F.max_pool2d(F.relu(self.conv2(x)), 2)
76
+ x = torch.flatten(x, 1)
77
+ x = F.relu(self.fc1(x))
78
+ x = F.relu(self.fc2(x))
79
+ x = self.fc3(x)
80
+ return x
81
+
82
+ def save(self, cfg, epoch, stats, best=False):
83
+ output = cfg.get('output')
84
+
85
+ os.makedirs(output, exist_ok=True)
86
+
87
+ stats['model'] = self.state_dict()
88
+
89
+ torch.save(stats, open(f'{output}/{epoch:04d}.pt', 'wb'))
90
+
91
+ if best:
92
+ torch.save(stats, open(f'{output}/best.pt', 'wb'))
93
+
94
+
95
+ def load(cfg):
96
+ return SmallModel.load(cfg)
cv4e_lecture13/models/best.pt ADDED
Binary file (537 kB). View file
 
cv4e_lecture13/train.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ The lecture materials for Lecture 1: Dataset Prototyping and Visualization
5
+ """
6
+ from . import model, dataset, utils
7
+ import torch
8
+ import click
9
+ import torch.nn as nn
10
+ from tqdm import trange
11
+ from torch.optim import Adam
12
+
13
+
14
+ log = None
15
+
16
+
17
+ def inference(cfg, dataloader, net, optimizer, criterion, update):
18
+ '''
19
+ Our actual training function.
20
+ '''
21
+ device = cfg.get('device')
22
+
23
+ torch.set_grad_enabled(update)
24
+ net.train() if update else net.eval()
25
+ type_str = 'Train' if update else 'Val'
26
+
27
+ loss, accuracy = 0.0, 0.0
28
+ total = len(dataloader)
29
+
30
+ prog = trange(total)
31
+ for index, (data, labels) in enumerate(dataloader):
32
+ data, labels = data.to(device), labels.to(device)
33
+
34
+ prediction = net(data)
35
+ gradient = criterion(prediction, labels)
36
+
37
+ if update:
38
+ optimizer.zero_grad()
39
+ gradient.backward()
40
+ optimizer.step()
41
+
42
+ # log statistics
43
+ loss += gradient.item()
44
+ label_ = torch.argmax(prediction, dim=1)
45
+ accuracy += torch.mean((label_ == labels).float()).item()
46
+
47
+ prog.set_description('[{:s}] Loss: {:.2f}; Acc: {:.2f}%'.format(type_str, loss / (index + 1), 100.0 * accuracy / (index + 1)))
48
+ prog.update(1)
49
+ prog.close()
50
+
51
+ loss /= total
52
+ accuracy /= total
53
+
54
+ return loss, accuracy
55
+
56
+
57
+ @click.command()
58
+ @click.option('--config', help='Path to config file', default='configs/mnist_resnet18.yaml')
59
+ def lecture(config):
60
+ """
61
+ Main function for Lecture 1: Dataset Prototyping and Visualization
62
+ """
63
+ global log
64
+
65
+ log = utils.init_logging()
66
+
67
+ cfg = utils.init_config(config, log)
68
+
69
+ # init random number generator seed (set at the start)
70
+ utils.init_seed(cfg.get('seed', None))
71
+
72
+ ################################################################################
73
+ # Load MNIST
74
+ train, test = dataset.load(cfg)
75
+
76
+ net, epoch, best_loss = model.load(cfg)
77
+
78
+ optimizer = Adam(
79
+ net.parameters(),
80
+ lr=cfg.get('learning_rate'),
81
+ weight_decay=cfg.get('weight_decay'),
82
+ )
83
+ criterion = nn.CrossEntropyLoss()
84
+
85
+ epochs = cfg.get('max_epochs')
86
+ while epoch < epochs:
87
+ log.info(f'Epoch {epoch}/{epochs}')
88
+
89
+ loss_train, accuracy_train = inference(cfg, train, net, optimizer, criterion, update=True)
90
+ loss_test, accuracy_test = inference(cfg, test, net, optimizer, criterion, update=False)
91
+
92
+ # combine stats and save
93
+ stats = {
94
+ 'loss_train': loss_train,
95
+ 'loss_val': loss_test,
96
+ 'accuracy_train': accuracy_train,
97
+ 'accuracy_test': accuracy_test
98
+ }
99
+
100
+ best = loss_test < best_loss
101
+ net.save(cfg, epoch, stats, best=best)
102
+ if not best:
103
+ log.warning('Stopping early')
104
+ break
105
+
106
+ best_loss = loss_test
107
+ epoch += 1
108
+
109
+
110
+ if __name__ == '__main__':
111
+ # Common boiler-plating needed to run the code from the command line as `python lecture.py` or `./lecture.py`
112
+ # This if condition will be False if the file is imported
113
+ lecture()
cv4e_lecture13/utils.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Various utility functions used (possibly) across scripts.
3
+
4
+ 2022 Benjamin Kellenberger
5
+ '''
6
+
7
+ import random
8
+ import torch
9
+ from torch.backends import cudnn
10
+ import logging
11
+ from logging.handlers import TimedRotatingFileHandler
12
+ import yaml
13
+
14
+
15
+ DAYS = 21
16
+
17
+
18
+ def init_logging():
19
+ """
20
+ Setup Python's built in logging functionality with on-disk logging, and prettier logging with Rich
21
+ """
22
+ # Import Rich
23
+ import rich
24
+ from rich.logging import RichHandler
25
+ from rich.style import Style
26
+ from rich.theme import Theme
27
+
28
+ name = 'lecture_1'
29
+
30
+ # Setup placeholder for logging handlers
31
+ handlers = []
32
+
33
+ # Configuration arguments for console, handlers, and logging
34
+ console_kwargs = {
35
+ 'theme': Theme(
36
+ {
37
+ 'logging.keyword': Style(bold=True, color='yellow'),
38
+ 'logging.level.notset': Style(dim=True),
39
+ 'logging.level.debug': Style(color='cyan'),
40
+ 'logging.level.info': Style(color='green'),
41
+ 'logging.level.warning': Style(color='yellow'),
42
+ 'logging.level.error': Style(color='red', bold=True),
43
+ 'logging.level.critical': Style(color='red', bold=True, reverse=True),
44
+ 'log.time': Style(color='white'),
45
+ }
46
+ )
47
+ }
48
+ handler_kwargs = {
49
+ 'rich_tracebacks': True,
50
+ 'tracebacks_show_locals': True,
51
+ }
52
+ logging_kwargs = {
53
+ 'level': logging.INFO,
54
+ 'format': '[%(name)s] %(message)s',
55
+ 'datefmt': '[%X]',
56
+ }
57
+
58
+ # Add file-baesd log handler
59
+ handlers.append(
60
+ TimedRotatingFileHandler(
61
+ filename=f'{name}.log',
62
+ when='midnight',
63
+ backupCount=DAYS,
64
+ ),
65
+ )
66
+
67
+ # Add rich (fancy logging) log handler
68
+ rich.reconfigure(**console_kwargs)
69
+ handlers.append(RichHandler(**handler_kwargs))
70
+
71
+ # Setup global logger with the handlers and set the default level to INFO
72
+ logging.basicConfig(handlers=handlers, **logging_kwargs)
73
+ logger = logging.getLogger()
74
+ logger.setLevel(logging.INFO)
75
+ log = logging.getLogger(name)
76
+
77
+ return log
78
+
79
+
80
+ def init_seed(seed):
81
+ if seed is not None:
82
+ random.seed(seed)
83
+ # numpy.random.seed(seed) # we don't use NumPy in this code, but you would want to set its random number generator seed, too
84
+ torch.manual_seed(seed)
85
+ torch.cuda.manual_seed(seed)
86
+ cudnn.benchmark = True
87
+ cudnn.deterministic = True
88
+
89
+
90
+ def init_config(config, log):
91
+ # load config
92
+ log.info(f'Using config "{config}"')
93
+ cfg = yaml.safe_load(open(config, 'r'))
94
+
95
+ cfg['log'] = log
96
+
97
+ # check if GPU is available
98
+ device = cfg.get('device')
99
+ if device not in ['cpu']:
100
+ if torch.cuda.is_available():
101
+ cfg['device'] = 'cuda'
102
+ elif torch.backends.mps.is_available():
103
+ cfg['device'] = 'mps'
104
+ else:
105
+ log.warning(f'WARNING: device set to "{device}" but not available; falling back to CPU...')
106
+ cfg['device'] = 'cpu'
107
+
108
+ device = cfg.get('device')
109
+ log.info(f'Using device "{device}"')
110
+
111
+ return cfg
docs/conf.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration file for the Sphinx documentation builder.
2
+ #
3
+ # This file only contains a selection of the most common options. For a full
4
+ # list see the documentation:
5
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
+
7
+ # -- Path setup --------------------------------------------------------------
8
+
9
+ # If extensions (or modules to document with autodoc) are in another directory,
10
+ # add these directories to sys.path here. If the directory is relative to the
11
+ # documentation root, use os.path.abspath to make it absolute, like shown here.
12
+ #
13
+ # import os
14
+ # import sys
15
+ # sys.path.insert(0, os.path.abspath('.'))
16
+
17
+
18
+ # -- Project information -----------------------------------------------------
19
+
20
+ project = "CV4Ecology School, Lecture 13"
21
+ copyright = "2022"
22
+ author = "CV4EcologySchool"
23
+
24
+
25
+ # -- General configuration ---------------------------------------------------
26
+ # -- General configuration
27
+
28
+ extensions = [
29
+ "sphinx.ext.duration",
30
+ "sphinx.ext.doctest",
31
+ "sphinx.ext.autodoc",
32
+ "sphinx.ext.autosummary",
33
+ "sphinx.ext.intersphinx",
34
+ ]
35
+
36
+ intersphinx_mapping = {
37
+ "rtd": ("https://docs.readthedocs.io/en/stable/", None),
38
+ "python": ("https://docs.python.org/3/", None),
39
+ "sphinx": ("https://www.sphinx-doc.org/en/master/", None),
40
+ }
41
+ intersphinx_disabled_domains = ["std"]
42
+
43
+ templates_path = ["_templates"]
44
+
45
+ # -- Options for EPUB output
46
+ epub_show_urls = "footnote"
47
+
48
+ # List of patterns, relative to source directory, that match files and
49
+ # directories to ignore when looking for source files.
50
+ # This pattern also affects html_static_path and html_extra_path.
51
+ exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
52
+
53
+ # -- Options for HTML output -------------------------------------------------
54
+
55
+ # The theme to use for HTML and HTML Help pages. See the documentation for
56
+ # a list of builtin themes.
57
+ #
58
+ html_theme = "sphinx_rtd_theme"
59
+
60
+ # Add any paths that contain custom static files (such as style sheets) here,
61
+ # relative to this directory. They are copied after the builtin static files,
62
+ # so a file named "default.css" will overwrite the builtin "default.css".
63
+ html_static_path = ["_static"]
docs/index.rst ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. include:: ../README.rst
2
+
3
+ .. note::
4
+
5
+ This project is under active development.
6
+
7
+ Contents
8
+ --------
9
+
10
+ .. toctree::
11
+
12
+ Home <self>
13
+ usage
14
+ package
docs/package.rst ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Package
2
+ =======
3
+
4
+ .. toctree::
5
+ :maxdepth: 3
6
+ :caption: Contents:
7
+
8
+
9
+ dataset.py
10
+ ----------
11
+
12
+ .. automodule:: cv4e_lecture13.dataset
13
+ :members:
14
+ :undoc-members:
15
+ :show-inheritance:
16
+
17
+ model.py
18
+ ----------
19
+
20
+ .. automodule:: cv4e_lecture13.model
21
+ :members:
22
+ :undoc-members:
23
+ :show-inheritance:
24
+
25
+ train.py
26
+ --------
27
+
28
+ .. automodule:: cv4e_lecture13.train
29
+ :members:
30
+ :undoc-members:
31
+ :show-inheritance:
32
+
33
+ utils.py
34
+ --------
35
+
36
+ .. automodule:: cv4e_lecture13.utils
37
+ :members:
38
+ :undoc-members:
39
+ :show-inheritance:
docs/usage.rst ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Usage
2
+ =====
3
+
4
+ .. _installation:
5
+
6
+ Installation
7
+ ------------
8
+
9
+ To use this code, first install its dependencies using pip:
10
+
11
+ .. code-block:: console
12
+
13
+ (.venv) $ pip install -r requirements.txt
14
+
15
+ then, you can run the application via:
16
+
17
+ .. code-block:: console
18
+
19
+ (.venv) $ python app.py
examples/example_1.jpg ADDED
examples/example_10.jpg ADDED
examples/example_11.jpg ADDED
examples/example_12.jpg ADDED
examples/example_13.jpg ADDED
examples/example_14.jpg ADDED
examples/example_15.jpg ADDED
examples/example_16.jpg ADDED
examples/example_17.jpg ADDED
examples/example_18.jpg ADDED
examples/example_19.jpg ADDED
examples/example_2.jpg ADDED
examples/example_20.jpg ADDED
examples/example_21.jpg ADDED
examples/example_22.jpg ADDED
examples/example_23.jpg ADDED
examples/example_24.jpg ADDED
examples/example_25.jpg ADDED
examples/example_26.jpg ADDED
examples/example_27.jpg ADDED
examples/example_28.jpg ADDED
examples/example_29.jpg ADDED
examples/example_3.jpg ADDED
examples/example_30.jpg ADDED