bluemellophone commited on
Commit
2192664
·
unverified ·
1 Parent(s): dd0d126

Linted files

Browse files
.gitignore CHANGED
@@ -9,4 +9,4 @@ cv4e_lecture13/datasets/
9
  coverage/
10
 
11
  __pycache__/
12
- docs/build
 
9
  coverage/
10
 
11
  __pycache__/
12
+ docs/build
.pre-commit-config.yaml CHANGED
@@ -30,6 +30,7 @@ repos:
30
  rev: v3.1.0
31
  hooks:
32
  - id: check-ast
 
33
  - id: check-executables-have-shebangs
34
  - id: check-docstring-first
35
  - id: double-quote-string-fixer
 
30
  rev: v3.1.0
31
  hooks:
32
  - id: check-ast
33
+ - id: check-toml
34
  - id: check-executables-have-shebangs
35
  - id: check-docstring-first
36
  - id: double-quote-string-fixer
LICENSE CHANGED
@@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- SOFTWARE
 
18
  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE
README.md CHANGED
@@ -2,8 +2,8 @@
2
  title: CV4Ecology School - Lecture 13
3
  metaTitle: "Serving, Hosting, and Deploying Models and Quality Control"
4
  emoji: 🌎
5
- colorFrom: red
6
- colorTo: purple
7
  sdk: gradio
8
  sdk_version: 3.1.4
9
  app_file: app.py
 
2
  title: CV4Ecology School - Lecture 13
3
  metaTitle: "Serving, Hosting, and Deploying Models and Quality Control"
4
  emoji: 🌎
5
+ colorFrom: blue
6
+ colorTo: green
7
  sdk: gradio
8
  sdk_version: 3.1.4
9
  app_file: app.py
README.rst CHANGED
@@ -38,7 +38,7 @@ You need to first install Anaconda on your machine. Below are the instructions
38
  Once Anaconda is installed, you will need an environment and the following packages installed
39
 
40
  .. code:: bash
41
-
42
  # Create Environment
43
  conda create --name cv4e
44
  conda activate cv4e
@@ -46,16 +46,16 @@ Once Anaconda is installed, you will need an environment and the following packa
46
  # Install Python dependencies
47
  conda install pip
48
 
49
- conda install -r requirements.txt
50
  conda install pytorch torchvision -c pytorch-nightly
51
 
52
  How to Run
53
  ----------
54
 
55
- The lecture materials will run as a single executable. The MNIST dataset must be downloaded from the internet for this script to run correctly, so Internet access is required at first to download the files once. It is recommended to use `ipython` and to copy sections of code into and inspecting the
56
 
57
  .. code:: bash
58
-
59
  # Run with Python
60
  python lecture.py
61
 
 
38
  Once Anaconda is installed, you will need an environment and the following packages installed
39
 
40
  .. code:: bash
41
+
42
  # Create Environment
43
  conda create --name cv4e
44
  conda activate cv4e
 
46
  # Install Python dependencies
47
  conda install pip
48
 
49
+ conda install -r requirements.txt
50
  conda install pytorch torchvision -c pytorch-nightly
51
 
52
  How to Run
53
  ----------
54
 
55
+ The lecture materials will run as a single executable. The MNIST dataset must be downloaded from the internet for this script to run correctly, so Internet access is required at first to download the files once. It is recommended to use `ipython` and to copy sections of code into and inspecting the
56
 
57
  .. code:: bash
58
+
59
  # Run with Python
60
  python lecture.py
61
 
app.py CHANGED
@@ -1,9 +1,10 @@
1
- from cv4e_lecture13 import model, utils
 
2
  import torch
3
  from PIL import Image, ImageOps # NOQA
4
  from torchvision.transforms import Compose, Resize, ToTensor
5
- import gradio as gr
6
 
 
7
 
8
  config = 'cv4e_lecture13/configs/mnist_resnet18.yaml'
9
 
@@ -11,7 +12,7 @@ log = utils.init_logging()
11
  cfg = utils.init_config(config, log)
12
  device = cfg.get('device')
13
 
14
- cfg['output'] = 'cv4e_lecture13/%s' % (cfg['output'], )
15
 
16
  net, _, _ = model.load(cfg)
17
  net.eval()
@@ -20,10 +21,7 @@ net.eval()
20
  def predict(inp):
21
  inp = ImageOps.grayscale(inp)
22
 
23
- transforms = Compose([
24
- Resize((cfg['image_size'])),
25
- ToTensor()
26
- ])
27
  inp = transforms(inp).unsqueeze(0)
28
  data = inp.to(device)
29
 
@@ -33,7 +31,10 @@ def predict(inp):
33
  confidences = torch.softmax(prediction[0], dim=0).cpu().numpy()
34
  confidences = list(enumerate(confidences))
35
  confidences = [
36
- (str(label), float(conf) , )
 
 
 
37
  for label, conf in confidences
38
  ]
39
  confidences = dict(confidences)
@@ -42,13 +43,10 @@ def predict(inp):
42
 
43
 
44
  interface = gr.Interface(
45
- fn=predict,
46
  inputs=gr.Image(type='pil'),
47
  outputs=gr.Label(num_top_classes=3),
48
- examples=[
49
- f'examples/example_{index}.jpg'
50
- for index in range(1, 31)
51
- ]
52
  )
53
 
54
  interface.launch()
 
1
+ # -*- coding: utf-8 -*-
2
+ import gradio as gr
3
  import torch
4
  from PIL import Image, ImageOps # NOQA
5
  from torchvision.transforms import Compose, Resize, ToTensor
 
6
 
7
+ from cv4e_lecture13 import model, utils
8
 
9
  config = 'cv4e_lecture13/configs/mnist_resnet18.yaml'
10
 
 
12
  cfg = utils.init_config(config, log)
13
  device = cfg.get('device')
14
 
15
+ cfg['output'] = 'cv4e_lecture13/{}'.format(cfg['output'])
16
 
17
  net, _, _ = model.load(cfg)
18
  net.eval()
 
21
  def predict(inp):
22
  inp = ImageOps.grayscale(inp)
23
 
24
+ transforms = Compose([Resize(cfg['image_size']), ToTensor()])
 
 
 
25
  inp = transforms(inp).unsqueeze(0)
26
  data = inp.to(device)
27
 
 
31
  confidences = torch.softmax(prediction[0], dim=0).cpu().numpy()
32
  confidences = list(enumerate(confidences))
33
  confidences = [
34
+ (
35
+ str(label),
36
+ float(conf),
37
+ )
38
  for label, conf in confidences
39
  ]
40
  confidences = dict(confidences)
 
43
 
44
 
45
  interface = gr.Interface(
46
+ fn=predict,
47
  inputs=gr.Image(type='pil'),
48
  outputs=gr.Label(num_top_classes=3),
49
+ examples=[f'examples/example_{index}.jpg' for index in range(1, 31)],
 
 
 
50
  )
51
 
52
  interface.launch()
cv4e_lecture13/__init__.py CHANGED
@@ -1,3 +1,4 @@
 
1
  '''
2
  2022 Benjamin Kellenberger
3
  '''
 
1
+ # -*- coding: utf-8 -*-
2
  '''
3
  2022 Benjamin Kellenberger
4
  '''
cv4e_lecture13/configs/mnist_resnet18.yaml CHANGED
@@ -18,4 +18,4 @@ learning_rate: 0.001
18
  weight_decay: 0.001
19
 
20
  # output params
21
- output: models
 
18
  weight_decay: 0.001
19
 
20
  # output params
21
+ output: models
cv4e_lecture13/dataset.py CHANGED
@@ -1,13 +1,15 @@
 
1
  '''
2
  Model implementation.
3
  We'll be using a "simple" ResNet-18 for image classification here.
4
 
5
  2022 Benjamin Kellenberger
6
  '''
 
 
7
  import torch
8
  from torchvision import datasets
9
  from torchvision.transforms import Compose, Resize, ToTensor
10
- from os.path import abspath
11
 
12
 
13
  def load(cfg):
@@ -20,32 +22,26 @@ def load(cfg):
20
 
21
  train = torch.utils.data.DataLoader(
22
  datasets.MNIST(
23
- root,
24
- train=True,
25
- transform=Compose([
26
- Resize((cfg['image_size'])),
27
- ToTensor()
28
- ]),
29
- download=True
30
- ),
31
- batch_size=cfg.get('batch_size'),
32
- shuffle=True,
33
- num_workers=cfg.get('num_workers')
34
  )
35
-
36
  test = torch.utils.data.DataLoader(
37
  datasets.MNIST(
38
- root,
39
- train=False,
40
- transform=Compose([
41
- Resize((cfg['image_size'])),
42
- ToTensor()
43
- ]),
44
- download=True
45
- ),
46
- batch_size=cfg.get('batch_size'),
47
- shuffle=False,
48
- num_workers=cfg.get('num_workers')
49
  )
50
 
51
  return train, test
 
1
+ # -*- coding: utf-8 -*-
2
  '''
3
  Model implementation.
4
  We'll be using a "simple" ResNet-18 for image classification here.
5
 
6
  2022 Benjamin Kellenberger
7
  '''
8
+ from os.path import abspath
9
+
10
  import torch
11
  from torchvision import datasets
12
  from torchvision.transforms import Compose, Resize, ToTensor
 
13
 
14
 
15
  def load(cfg):
 
22
 
23
  train = torch.utils.data.DataLoader(
24
  datasets.MNIST(
25
+ root,
26
+ train=True,
27
+ transform=Compose([Resize(cfg['image_size']), ToTensor()]),
28
+ download=True,
29
+ ),
30
+ batch_size=cfg.get('batch_size'),
31
+ shuffle=True,
32
+ num_workers=cfg.get('num_workers'),
 
 
 
33
  )
34
+
35
  test = torch.utils.data.DataLoader(
36
  datasets.MNIST(
37
+ root,
38
+ train=False,
39
+ transform=Compose([Resize(cfg['image_size']), ToTensor()]),
40
+ download=True,
41
+ ),
42
+ batch_size=cfg.get('batch_size'),
43
+ shuffle=False,
44
+ num_workers=cfg.get('num_workers'),
 
 
 
45
  )
46
 
47
  return train, test
cv4e_lecture13/model.py CHANGED
@@ -1,3 +1,4 @@
 
1
  '''
2
  Model implementation.
3
  We'll be using a "simple" ResNet-18 for image classification here.
@@ -5,17 +6,17 @@
5
  2022 Benjamin Kellenberger
6
  '''
7
 
 
 
 
 
 
8
  import torch
9
  import torch.nn as nn
10
  import torch.nn.functional as F
11
- import numpy as np
12
- import glob
13
- import os
14
- from os.path import split, splitext, exists
15
 
16
 
17
  class SmallModel(nn.Module):
18
-
19
  @classmethod
20
  def load(cls, cfg):
21
  log = cfg.get('log')
@@ -30,11 +31,7 @@ class SmallModel(nn.Module):
30
  filepaths = sorted(glob.glob(f'{output}/*.pt'))
31
 
32
  if len(filepaths) > 1:
33
- filepaths = [
34
- filepath
35
- for filepath in filepaths
36
- if 'best.pt' not in filepath
37
- ]
38
 
39
  if len(filepaths):
40
  filepath = filepaths[-1]
@@ -49,7 +46,7 @@ class SmallModel(nn.Module):
49
  epoch = int(splitext(filename)[0])
50
  except ValueError:
51
  pass
52
-
53
  filepath = f'{output}/best.pt'
54
  if exists(filepath):
55
  state = torch.load(open(filepath, 'rb'), map_location='cpu')
@@ -73,7 +70,7 @@ class SmallModel(nn.Module):
73
  def forward(self, x):
74
  x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
75
  x = F.max_pool2d(F.relu(self.conv2(x)), 2)
76
- x = torch.flatten(x, 1)
77
  x = F.relu(self.fc1(x))
78
  x = F.relu(self.fc2(x))
79
  x = self.fc3(x)
 
1
+ # -*- coding: utf-8 -*-
2
  '''
3
  Model implementation.
4
  We'll be using a "simple" ResNet-18 for image classification here.
 
6
  2022 Benjamin Kellenberger
7
  '''
8
 
9
+ import glob
10
+ import os
11
+ from os.path import exists, split, splitext
12
+
13
+ import numpy as np
14
  import torch
15
  import torch.nn as nn
16
  import torch.nn.functional as F
 
 
 
 
17
 
18
 
19
  class SmallModel(nn.Module):
 
20
  @classmethod
21
  def load(cls, cfg):
22
  log = cfg.get('log')
 
31
  filepaths = sorted(glob.glob(f'{output}/*.pt'))
32
 
33
  if len(filepaths) > 1:
34
+ filepaths = [filepath for filepath in filepaths if 'best.pt' not in filepath]
 
 
 
 
35
 
36
  if len(filepaths):
37
  filepath = filepaths[-1]
 
46
  epoch = int(splitext(filename)[0])
47
  except ValueError:
48
  pass
49
+
50
  filepath = f'{output}/best.pt'
51
  if exists(filepath):
52
  state = torch.load(open(filepath, 'rb'), map_location='cpu')
 
70
  def forward(self, x):
71
  x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
72
  x = F.max_pool2d(F.relu(self.conv2(x)), 2)
73
+ x = torch.flatten(x, 1)
74
  x = F.relu(self.fc1(x))
75
  x = F.relu(self.fc2(x))
76
  x = self.fc3(x)
cv4e_lecture13/train.py CHANGED
@@ -3,20 +3,20 @@
3
  """
4
  The lecture materials for Lecture 1: Dataset Prototyping and Visualization
5
  """
6
- from . import model, dataset, utils
7
- import torch
8
  import click
 
9
  import torch.nn as nn
10
- from tqdm import trange
11
  from torch.optim import Adam
 
12
 
 
13
 
14
  log = None
15
 
16
 
17
  def inference(cfg, dataloader, net, optimizer, criterion, update):
18
  '''
19
- Our actual training function.
20
  '''
21
  device = cfg.get('device')
22
 
@@ -44,10 +44,14 @@ def inference(cfg, dataloader, net, optimizer, criterion, update):
44
  label_ = torch.argmax(prediction, dim=1)
45
  accuracy += torch.mean((label_ == labels).float()).item()
46
 
47
- prog.set_description('[{:s}] Loss: {:.2f}; Acc: {:.2f}%'.format(type_str, loss / (index + 1), 100.0 * accuracy / (index + 1)))
 
 
 
 
48
  prog.update(1)
49
  prog.close()
50
-
51
  loss /= total
52
  accuracy /= total
53
 
@@ -55,7 +59,9 @@ def inference(cfg, dataloader, net, optimizer, criterion, update):
55
 
56
 
57
  @click.command()
58
- @click.option('--config', help='Path to config file', default='configs/mnist_resnet18.yaml')
 
 
59
  def lecture(config):
60
  """
61
  Main function for Lecture 1: Dataset Prototyping and Visualization
@@ -86,15 +92,19 @@ def lecture(config):
86
  while epoch < epochs:
87
  log.info(f'Epoch {epoch}/{epochs}')
88
 
89
- loss_train, accuracy_train = inference(cfg, train, net, optimizer, criterion, update=True)
90
- loss_test, accuracy_test = inference(cfg, test, net, optimizer, criterion, update=False)
 
 
 
 
91
 
92
  # combine stats and save
93
  stats = {
94
  'loss_train': loss_train,
95
  'loss_val': loss_test,
96
  'accuracy_train': accuracy_train,
97
- 'accuracy_test': accuracy_test
98
  }
99
 
100
  best = loss_test < best_loss
 
3
  """
4
  The lecture materials for Lecture 1: Dataset Prototyping and Visualization
5
  """
 
 
6
  import click
7
+ import torch
8
  import torch.nn as nn
 
9
  from torch.optim import Adam
10
+ from tqdm import trange
11
 
12
+ from . import dataset, model, utils
13
 
14
  log = None
15
 
16
 
17
  def inference(cfg, dataloader, net, optimizer, criterion, update):
18
  '''
19
+ Our actual training function.
20
  '''
21
  device = cfg.get('device')
22
 
 
44
  label_ = torch.argmax(prediction, dim=1)
45
  accuracy += torch.mean((label_ == labels).float()).item()
46
 
47
+ prog.set_description(
48
+ '[{:s}] Loss: {:.2f}; Acc: {:.2f}%'.format(
49
+ type_str, loss / (index + 1), 100.0 * accuracy / (index + 1)
50
+ )
51
+ )
52
  prog.update(1)
53
  prog.close()
54
+
55
  loss /= total
56
  accuracy /= total
57
 
 
59
 
60
 
61
  @click.command()
62
+ @click.option(
63
+ '--config', help='Path to config file', default='configs/mnist_resnet18.yaml'
64
+ )
65
  def lecture(config):
66
  """
67
  Main function for Lecture 1: Dataset Prototyping and Visualization
 
92
  while epoch < epochs:
93
  log.info(f'Epoch {epoch}/{epochs}')
94
 
95
+ loss_train, accuracy_train = inference(
96
+ cfg, train, net, optimizer, criterion, update=True
97
+ )
98
+ loss_test, accuracy_test = inference(
99
+ cfg, test, net, optimizer, criterion, update=False
100
+ )
101
 
102
  # combine stats and save
103
  stats = {
104
  'loss_train': loss_train,
105
  'loss_val': loss_test,
106
  'accuracy_train': accuracy_train,
107
+ 'accuracy_test': accuracy_test,
108
  }
109
 
110
  best = loss_test < best_loss
cv4e_lecture13/utils.py CHANGED
@@ -1,16 +1,17 @@
 
1
  '''
2
  Various utility functions used (possibly) across scripts.
3
 
4
  2022 Benjamin Kellenberger
5
  '''
6
 
7
- import random
8
- import torch
9
- from torch.backends import cudnn
10
  import logging
 
11
  from logging.handlers import TimedRotatingFileHandler
12
- import yaml
13
 
 
 
 
14
 
15
  DAYS = 21
16
 
@@ -102,7 +103,9 @@ def init_config(config, log):
102
  elif torch.backends.mps.is_available():
103
  cfg['device'] = 'mps'
104
  else:
105
- log.warning(f'WARNING: device set to "{device}" but not available; falling back to CPU...')
 
 
106
  cfg['device'] = 'cpu'
107
 
108
  device = cfg.get('device')
 
1
+ # -*- coding: utf-8 -*-
2
  '''
3
  Various utility functions used (possibly) across scripts.
4
 
5
  2022 Benjamin Kellenberger
6
  '''
7
 
 
 
 
8
  import logging
9
+ import random
10
  from logging.handlers import TimedRotatingFileHandler
 
11
 
12
+ import torch
13
+ import yaml
14
+ from torch.backends import cudnn
15
 
16
  DAYS = 21
17
 
 
103
  elif torch.backends.mps.is_available():
104
  cfg['device'] = 'mps'
105
  else:
106
+ log.warning(
107
+ f'WARNING: device set to "{device}" but not available; falling back to CPU...'
108
+ )
109
  cfg['device'] = 'cpu'
110
 
111
  device = cfg.get('device')
docs/conf.py CHANGED
@@ -1,3 +1,4 @@
 
1
  # Configuration file for the Sphinx documentation builder.
2
  #
3
  # This file only contains a selection of the most common options. For a full
@@ -17,47 +18,47 @@
17
 
18
  # -- Project information -----------------------------------------------------
19
 
20
- project = "CV4Ecology School, Lecture 13"
21
- copyright = "2022"
22
- author = "CV4EcologySchool"
23
 
24
 
25
  # -- General configuration ---------------------------------------------------
26
  # -- General configuration
27
 
28
  extensions = [
29
- "sphinx.ext.duration",
30
- "sphinx.ext.doctest",
31
- "sphinx.ext.autodoc",
32
- "sphinx.ext.autosummary",
33
- "sphinx.ext.intersphinx",
34
  ]
35
 
36
  intersphinx_mapping = {
37
- "rtd": ("https://docs.readthedocs.io/en/stable/", None),
38
- "python": ("https://docs.python.org/3/", None),
39
- "sphinx": ("https://www.sphinx-doc.org/en/master/", None),
40
  }
41
- intersphinx_disabled_domains = ["std"]
42
 
43
- templates_path = ["_templates"]
44
 
45
  # -- Options for EPUB output
46
- epub_show_urls = "footnote"
47
 
48
  # List of patterns, relative to source directory, that match files and
49
  # directories to ignore when looking for source files.
50
  # This pattern also affects html_static_path and html_extra_path.
51
- exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
52
 
53
  # -- Options for HTML output -------------------------------------------------
54
 
55
  # The theme to use for HTML and HTML Help pages. See the documentation for
56
  # a list of builtin themes.
57
  #
58
- html_theme = "sphinx_rtd_theme"
59
 
60
  # Add any paths that contain custom static files (such as style sheets) here,
61
  # relative to this directory. They are copied after the builtin static files,
62
  # so a file named "default.css" will overwrite the builtin "default.css".
63
- html_static_path = ["_static"]
 
1
+ # -*- coding: utf-8 -*-
2
  # Configuration file for the Sphinx documentation builder.
3
  #
4
  # This file only contains a selection of the most common options. For a full
 
18
 
19
  # -- Project information -----------------------------------------------------
20
 
21
+ project = 'CV4Ecology School, Lecture 13'
22
+ copyright = '2022'
23
+ author = 'CV4EcologySchool'
24
 
25
 
26
  # -- General configuration ---------------------------------------------------
27
  # -- General configuration
28
 
29
  extensions = [
30
+ 'sphinx.ext.duration',
31
+ 'sphinx.ext.doctest',
32
+ 'sphinx.ext.autodoc',
33
+ 'sphinx.ext.autosummary',
34
+ 'sphinx.ext.intersphinx',
35
  ]
36
 
37
  intersphinx_mapping = {
38
+ 'rtd': ('https://docs.readthedocs.io/en/stable/', None),
39
+ 'python': ('https://docs.python.org/3/', None),
40
+ 'sphinx': ('https://www.sphinx-doc.org/en/master/', None),
41
  }
42
+ intersphinx_disabled_domains = ['std']
43
 
44
+ templates_path = ['_templates']
45
 
46
  # -- Options for EPUB output
47
+ epub_show_urls = 'footnote'
48
 
49
  # List of patterns, relative to source directory, that match files and
50
  # directories to ignore when looking for source files.
51
  # This pattern also affects html_static_path and html_extra_path.
52
+ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
53
 
54
  # -- Options for HTML output -------------------------------------------------
55
 
56
  # The theme to use for HTML and HTML Help pages. See the documentation for
57
  # a list of builtin themes.
58
  #
59
+ html_theme = 'sphinx_rtd_theme'
60
 
61
  # Add any paths that contain custom static files (such as style sheets) here,
62
  # relative to this directory. They are copied after the builtin static files,
63
  # so a file named "default.css" will overwrite the builtin "default.css".
64
+ html_static_path = ['_static']
requirements.txt CHANGED
@@ -4,13 +4,13 @@ Pillow
4
  numpy
5
  PyYAML
6
  tqdm
7
- pre-commit
8
  cryptography
9
  argparse
10
  gradio
11
- numpy
12
- ipython
13
- rich
14
  click
15
  brunette
16
  pytest
 
4
  numpy
5
  PyYAML
6
  tqdm
7
+ pre-commit
8
  cryptography
9
  argparse
10
  gradio
11
+ numpy
12
+ ipython
13
+ rich
14
  click
15
  brunette
16
  pytest
setup.cfg CHANGED
@@ -8,7 +8,7 @@ author = CV4EcologySchool
8
  author_email = cv4ecology@caltech.edu,
9
  license = MIT
10
  license_file = LICENSE
11
- project_urls =
12
  Documentation = https://cv4e_lecture13.readthedocs.io
13
  Source = https://github.com/CV4EcologySchool
14
 
 
8
  author_email = cv4ecology@caltech.edu,
9
  license = MIT
10
  license_file = LICENSE
11
+ project_urls =
12
  Documentation = https://cv4e_lecture13.readthedocs.io
13
  Source = https://github.com/CV4EcologySchool
14
 
setup.py CHANGED
@@ -2,5 +2,5 @@
2
  # -*- coding: utf-8 -*-
3
  import setuptools
4
 
5
- if __name__ == "__main__":
6
  setuptools.setup()
 
2
  # -*- coding: utf-8 -*-
3
  import setuptools
4
 
5
+ if __name__ == '__main__':
6
  setuptools.setup()
tests/__init__.py CHANGED
@@ -1 +0,0 @@
1
- # -*- coding: utf-8 -*-
 
 
tests/conftest.py CHANGED
@@ -1,5 +1,6 @@
1
  # -*- coding: utf-8 -*-
2
  import logging
 
3
  import pytest
4
 
5
  log = logging.getLogger('pytest.conftest') # pylint: disable=invalid-name
@@ -13,10 +14,11 @@ def config():
13
  @pytest.fixture()
14
  def cfg(config):
15
  from cv4e_lecture13 import utils
 
16
  log = utils.init_logging()
17
  cfg = utils.init_config(config, log)
18
 
19
- cfg['output'] = 'cv4e_lecture13/%s' % (cfg['output'], )
20
 
21
  return cfg
22
 
@@ -31,6 +33,7 @@ def device(cfg):
31
  @pytest.fixture()
32
  def net(cfg):
33
  from cv4e_lecture13 import model
 
34
  net, _, _ = model.load(cfg)
35
  net.eval()
36
 
 
1
  # -*- coding: utf-8 -*-
2
  import logging
3
+
4
  import pytest
5
 
6
  log = logging.getLogger('pytest.conftest') # pylint: disable=invalid-name
 
14
  @pytest.fixture()
15
  def cfg(config):
16
  from cv4e_lecture13 import utils
17
+
18
  log = utils.init_logging()
19
  cfg = utils.init_config(config, log)
20
 
21
+ cfg['output'] = 'cv4e_lecture13/{}'.format(cfg['output'])
22
 
23
  return cfg
24
 
 
33
  @pytest.fixture()
34
  def net(cfg):
35
  from cv4e_lecture13 import model
36
+
37
  net, _, _ = model.load(cfg)
38
  net.eval()
39
 
tests/test_model.py CHANGED
@@ -1,7 +1,7 @@
1
  # -*- coding: utf-8 -*-
2
- from torchvision.transforms import Compose, Resize, ToTensor
3
- from PIL import Image, ImageOps
4
  import torch
 
 
5
 
6
 
7
  def test_architecture_params(net):
@@ -14,10 +14,7 @@ def test_model_prediction(cfg, device, net):
14
 
15
  image = ImageOps.grayscale(image)
16
 
17
- transforms = Compose([
18
- Resize((cfg['image_size'])),
19
- ToTensor()
20
- ])
21
  image = transforms(image).unsqueeze(0)
22
  data = image.to(device)
23
 
 
1
  # -*- coding: utf-8 -*-
 
 
2
  import torch
3
+ from PIL import Image, ImageOps
4
+ from torchvision.transforms import Compose, Resize, ToTensor
5
 
6
 
7
  def test_architecture_params(net):
 
14
 
15
  image = ImageOps.grayscale(image)
16
 
17
+ transforms = Compose([Resize(cfg['image_size']), ToTensor()])
 
 
 
18
  image = transforms(image).unsqueeze(0)
19
  data = image.to(device)
20