python_code stringlengths 0 229k |
|---|
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.gan.inception_score import InceptionScore
def calculate_inception_score(p_yx):
p_y = torch.unsqueeze(p_yx.mean(axis=0), 0)
kl_d = torch.kl_div(torch.log(p_y), p_yx)
... |
import os
import re
from unittest.mock import patch
import pytest
import pytorch_fid.fid_score as pytorch_fid_score
import scipy
import torch
from numpy import cov
import ignite.distributed as idist
from ignite.metrics.gan.fid import FID, fid_score
@pytest.fixture()
def mock_no_scipy():
with patch.dict("sys.mod... |
import pytest
from ignite.metrics.nlp.utils import lcs, modified_precision, ngrams
@pytest.mark.parametrize(
"sequence, n, expected_keys, expected_values",
[
([], 1, [], []),
([0, 1, 2], 1, [(0,), (1,), (2,)], [1, 1, 1]),
([0, 1, 2], 2, [(0, 1), (1, 2)], [1, 1]),
([0, 1, 2], 3... |
import os
import warnings
from collections import Counter
import pytest
import torch
from nltk.translate.bleu_score import corpus_bleu, sentence_bleu, SmoothingFunction
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.nlp import Bleu
from . import CorpusForTest
... |
__all__ = ["CorpusForTest"]
class CorpusForTest:
def __init__(self, lower_split=False):
def preproc(text):
if lower_split:
return text.lower().split()
else:
return text
# BLEU Paper examples
self.cand_1 = preproc("the the the the the... |
import os
import nltk
import pytest
import rouge as pyrouge
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.nlp import Rouge
from ignite.metrics.nlp.rouge import compute_ngram_scores, RougeL, RougeN
from . import CorpusForTest
nltk.download("punkt... |
import argparse
import torch
import ignite.distributed as idist
def training(local_rank, config, **kwargs):
import time
time.sleep(idist.get_rank() * 0.1)
print(idist.get_rank(), ": run with config:", config, "- kwargs:", kwargs, f"- backend={idist.backend()}")
t = torch.tensor([idist.get_rank()]... |
import os
import pytest
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import _InfiniteConstantSampler
from torch.utils.data.dataset import Dataset, IterableDataset
from torch.utils.data.distributed import DistributedSampler
from ... |
import os
import subprocess
import sys
from pathlib import Path
import pytest
import torch
from packaging.version import Version
import ignite.distributed as idist
from ignite.distributed.utils import has_hvd_support, has_native_dist_support, has_xla_support
def test_parallel_wrong_inputs():
with pytest.raises(... |
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.distributed.utils import has_hvd_support
from tests.ignite.distributed.utils import (
_test_distrib__get_max_length,
_test_distrib_all_gather,
_test_distrib_all_gather_group,
_test_distrib_all_reduce,
_test_distrib... |
import torch
import ignite.distributed as idist
from tests.ignite.distributed.utils import (
_sanity_check,
_test_distrib__get_max_length,
_test_distrib_all_gather,
_test_distrib_all_reduce,
_test_distrib_barrier,
_test_distrib_broadcast,
_test_distrib_new_group,
_test_sync,
)
def tes... |
import pytest
import torch
import torch.distributed as dist
import ignite.distributed as idist
from ignite.distributed.utils import sync
from ignite.engine import Engine, Events
def _sanity_check():
from ignite.distributed.utils import _model
assert _model.get_world_size() == _model.get_nnodes() * _model.ge... |
import os
import pytest
import ignite.distributed as idist
from ignite.distributed.utils import has_xla_support
from tests.ignite.distributed.utils import (
_test_distrib_all_gather,
_test_distrib_all_gather_group,
_test_distrib_all_reduce,
_test_distrib_all_reduce_group,
_test_distrib_barrier,
... |
import os
import pytest
import torch
import torch.distributed as dist
from packaging.version import Version
import ignite.distributed as idist
from ignite.distributed.utils import has_native_dist_support
from tests.ignite.distributed.utils import (
_test_distrib__get_max_length,
_test_distrib_all_gather,
... |
import pytest
import torch
from ignite.distributed.comp_models import has_hvd_support
if not has_hvd_support:
pytest.skip("Skip if no Horovod package", allow_module_level=True)
else:
import horovod.torch as hvd
from ignite.distributed.comp_models.horovod import _HorovodDistModel
@pytest.mark.distribute... |
import os
import pytest
import torch
from ignite.distributed.comp_models import has_xla_support
if not has_xla_support:
pytest.skip("Skip if no XLA support", allow_module_level=True)
else:
from ignite.distributed.comp_models.xla import _XlaDistModel
@pytest.mark.tpu
@pytest.mark.skipif(not has_xla_support,... |
import pytest
import torch
from ignite.distributed.comp_models.base import _SerialModel, ComputationModel
def test_serial_model():
_SerialModel.create_from_backend()
model = _SerialModel.create_from_context()
assert model.get_local_rank() == 0
assert model.get_rank() == 0
assert model.get_world_... |
import os
import pytest
import torch
import torch.distributed as dist
from ignite.distributed.comp_models import has_native_dist_support
if not has_native_dist_support:
pytest.skip("Skip if no native dist support", allow_module_level=True)
else:
from ignite.distributed.comp_models.native import _expand_hostl... |
import random
from pathlib import Path
import pytest
@pytest.fixture
def no_site_packages(request):
import sys
modules = {}
for k in sys.modules:
if request.param in k:
modules[k] = sys.modules[k]
for k in modules:
del sys.modules[k]
prev_path = list(sys.path)
sy... |
# coding: utf-8
|
from unittest.mock import Mock, patch
import pytest
import torch
from ignite.contrib.metrics import GpuInfo
from ignite.engine import Engine, State
def test_no_pynvml_package():
with patch.dict("sys.modules", {"pynvml.smi": None}):
with pytest.raises(ModuleNotFoundError, match="This contrib module requi... |
import os
from unittest.mock import patch
import pytest
import sklearn
import torch
from sklearn.metrics import average_precision_score
import ignite.distributed as idist
from ignite.contrib.metrics import AveragePrecision
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
torch.manual... |
from unittest.mock import patch
import numpy as np
import pytest
import sklearn
import torch
from sklearn.metrics import roc_curve
from ignite import distributed as idist
from ignite.contrib.metrics.roc_auc import RocCurve
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
from ignite.m... |
import os
from unittest.mock import patch
import pytest
import sklearn
import torch
from sklearn.metrics import roc_auc_score
import ignite.distributed as idist
from ignite.contrib.metrics import ROC_AUC
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
from ignite.metrics.epoch_metric... |
import os
from unittest.mock import patch
import pytest
import sklearn
import torch
from sklearn.metrics import cohen_kappa_score
import ignite.distributed as idist
from ignite.contrib.metrics import CohenKappa
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
torch.manual_seed(12)
... |
import os
from typing import Tuple
from unittest.mock import patch
import numpy as np
import pytest
import sklearn
import torch
from sklearn.metrics import precision_recall_curve
import ignite.distributed as idist
from ignite.contrib.metrics.precision_recall_curve import PrecisionRecallCurve
from ignite.engine import... |
import os
import numpy as np
import pytest
import torch
from sklearn.metrics import DistanceMetric
import ignite.distributed as idist
from ignite.contrib.metrics.regression import ManhattanDistance
from ignite.engine import Engine
def test_wrong_input_shapes():
m = ManhattanDistance()
with pytest.raises(Va... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MedianAbsolutePercentageError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MedianAbsolutePercentageError()
... |
import os
import numpy as np
import pytest
import torch
from sklearn.metrics import DistanceMetric
import ignite.distributed as idist
from ignite.contrib.metrics.regression import CanberraMetric
from ignite.engine import Engine
def test_wrong_input_shapes():
m = CanberraMetric()
with pytest.raises(ValueErr... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import WaveHedgesDistance
from ignite.engine import Engine
def test_wrong_input_shapes():
m = WaveHedgesDistance()
with pytest.raises(ValueError, match=r"Input data shapes shoul... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import GeometricMeanAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = GeometricMeanAbsoluteError()
with ... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MeanNormalizedBias
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MeanNormalizedBias()
with pytest.raises(
... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import GeometricMeanRelativeAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = GeometricMeanRelativeAbsoluteE... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import FractionalAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = FractionalAbsoluteError()
with pytest... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MedianAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MedianAbsoluteError()
with pytest.raises(... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MedianRelativeAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MedianRelativeAbsoluteError()
wit... |
from typing import Optional
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression._base import _BaseRegression, _torch_median
def test_base_regression_shapes():
class L1(_BaseRegression):
def reset(self):
self._sum_of_errors... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import FractionalBias
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = FractionalBias()
with pytest.raises(
N... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MeanError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MeanError()
with pytest.raises(NotComputableError, ... |
import os
import numpy as np
import pytest
import torch
from pytest import approx, raises
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MeanAbsoluteRelativeError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_wrong_input_shapes():
m ... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MaximumAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MaximumAbsoluteError()
with pytest.raise... |
import os
import numpy as np
import pytest
import torch
from sklearn.metrics import r2_score
import ignite.distributed as idist
from ignite.contrib.metrics.regression import R2Score
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = R2Score()
with p... |
import os
import sys
from unittest.mock import call, MagicMock
import pytest
import torch
import torch.nn as nn
from torch.utils.data.distributed import DistributedSampler
import ignite.contrib.handlers as handlers
import ignite.distributed as idist
from ignite.contrib.engines.common import (
_setup_logging,
... |
# coding: utf-8
import unittest.mock as mock
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ignite.contrib.engines import create_supervised_tbptt_trainer, Tbptt_Events
from ignite.contrib.engines.tbptt import _detach_hidden
def test_detach_hidden_R... |
from unittest.mock import Mock
import pytest
import torch
@pytest.fixture()
def norm_mock():
def norm(x: torch.Tensor):
return x.norm()
norm_mock = Mock(side_effect=norm, spec=norm)
norm_mock.configure_mock(**{"__name__": "norm"})
norm_mock.reset_mock()
return norm_mock
@pytest.fixture... |
import sys
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.mlflow_logger import (
global_step_from_engine,
MLflowLogger,
OptimizerParamsHandler,
OutputHandler,
)
from ignite.engine import Engine, Events, State
def test_output_handler_with_wrong_logge... |
from typing import Any, Union
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.base_logger import (
BaseLogger,
BaseOptimizerParamsHandler,
BaseOutputHandler,
BaseWeightsHandler,
BaseWeightsScalarHandler,
)
from ignite.engine import Engine, Events, ... |
import math
import os
from collections import defaultdict
from unittest.mock import ANY, call, MagicMock, patch
import clearml
import pytest
import torch
from clearml.binding.frameworks import WeightsFileHandler
from clearml.model import Framework
import ignite.distributed as idist
from ignite.contrib.handlers.clearm... |
class MockFP16DeepSpeedZeroOptimizer:
def __init__(self, optimizer):
self.optimizer = optimizer
def step(self, closure=None):
self.optimizer.step()
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_... |
import math
import warnings
from unittest.mock import MagicMock
import pytest
import torch
from ignite.contrib.handlers.neptune_logger import (
global_step_from_engine,
GradsScalarHandler,
NeptuneLogger,
NeptuneSaver,
OptimizerParamsHandler,
OutputHandler,
WeightsScalarHandler,
)
from igni... |
import sys
from unittest.mock import ANY, call, MagicMock, patch
import pytest
import torch
from ignite.contrib.handlers.visdom_logger import (
_DummyExecutor,
global_step_from_engine,
GradsScalarHandler,
OptimizerParamsHandler,
OutputHandler,
VisdomLogger,
WeightsScalarHandler,
)
from ign... |
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.wandb_logger import (
global_step_from_engine,
OptimizerParamsHandler,
OutputHandler,
WandBLogger,
)
from ignite.engine import Events, State
def test_optimizer_params_handler_wrong_setup():
with pyt... |
import os
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.polyaxon_logger import (
global_step_from_engine,
OptimizerParamsHandler,
OutputHandler,
PolyaxonLogger,
)
from ignite.engine import Engine, Events, State
os.environ["POLYAXON_NO_OP"] = "1"
d... |
# -*- coding: utf-8 -*-
import sys
import time
from argparse import Namespace
from unittest.mock import patch
import numpy as np
import pytest
import torch
from packaging.version import Version
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.handlers import Termina... |
import math
import os
from unittest.mock import ANY, call, MagicMock, patch
import pytest
import torch
from ignite.contrib.handlers.tensorboard_logger import (
global_step_from_engine,
GradsHistHandler,
GradsScalarHandler,
OptimizerParamsHandler,
OutputHandler,
TensorboardLogger,
WeightsHi... |
import os
import random
import sys
from collections.abc import Mapping
from unittest.mock import patch
import numpy as np
import pytest
import torch
import torch.nn as nn
from torch.optim import SGD
from torch.utils.data import BatchSampler, DataLoader, RandomSampler
import ignite.distributed as idist
from ignite.eng... |
import os
import time
from unittest.mock import call, MagicMock, Mock
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events, State
from ignite.engine.deterministic import keep_random_state
from ignite.metrics import Average
from tests.ignite.engine i... |
from collections.abc import Mapping
import pytest
import torch
from ignite.engine import Engine, Events, State
from tests.ignite.engine import BatchChecker, EpochCounter, IterationCounter
def test_state_dict():
engine = Engine(lambda e, b: 1)
sd = engine.state_dict()
assert isinstance(sd, Mapping) and l... |
import torch
try:
from torch.utils.data import IterableDataset
except ImportError:
class IterableDataset:
pass
class BatchChecker:
def __init__(self, data, init_counter=0):
self.counter = init_counter
self.data = data
self.true_batch = None
def check(self, batch):
... |
from enum import Enum
from unittest.mock import MagicMock
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.engine.events import CallableEventWithFilter, EventEnum, EventsList
def test_custom_events():
class CustomEvents(EventEnum):
TEST_E... |
import os
from importlib.util import find_spec
from typing import Optional, Union
from unittest import mock
from unittest.mock import MagicMock, patch
import pytest
import torch
from packaging.version import Version
from pytest import approx
from torch.nn.functional import mse_loss
from torch.optim import SGD
import ... |
import functools
import gc
from unittest.mock import call, create_autospec, MagicMock
import pytest
from pytest import raises
from ignite.engine import Engine, Events, State
from ignite.engine.events import EventsList
class DummyEngine(Engine):
def __init__(self):
super(DummyEngine, self).__init__(lambd... |
import sys
import time
import pytest
from ignite.engine import Engine, Events
from ignite.handlers import Timer
if sys.platform.startswith("darwin"):
pytest.skip("Skip if on MacOS", allow_module_level=True)
def test_timer():
sleep_t = 0.2
n_iter = 3
def _train_func(engine, batch):
time.sle... |
import pytest
import torch
@pytest.fixture()
def dummy_model_factory():
class DummyModel(torch.nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.fc1 = torch.nn.Linear(10, 10)
self.fc2 = torch.nn.Linear(12, 12)
self.fc1.weight.data.zero_... |
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.handlers import EarlyStopping
def do_nothing_update_fn(engine, batch):
pass
def test_args_validation():
trainer = Engine(do_nothing_update_fn)
with pytest.raises(ValueError, ma... |
from unittest.mock import MagicMock
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
def test_global_step_from_engine():
iteration = 12
epoch = 23
trainer = Engine(lambda e, b: None)
trainer.state.iteration = iteration
trainer.state.epoch = epoch
... |
import os
from typing import Any, Callable, Union
import pytest
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel, DistributedDataParallel
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.handlers import EMAHandler
def _get_dummy_model() -> nn.Modul... |
import re
from pathlib import Path
from unittest.mock import patch
import pytest
import torch
import torch.nn as nn
from packaging.version import Version
from ignite.engine import Engine, Events
from ignite.handlers.state_param_scheduler import (
ExpStateScheduler,
LambdaStateScheduler,
MultiStepStateSche... |
import time
import pytest
from ignite.engine import Engine, Events
from ignite.handlers import TimeLimit
def test_arg_validation():
with pytest.raises(ValueError, match=r"Argument limit_sec should be a positive integer."):
TimeLimit(limit_sec=-5)
with pytest.raises(TypeError, match=r"Argument limit... |
# Needed to collect coverage data
|
from unittest.mock import MagicMock, patch
import numpy as np
import pytest
import torch
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, ExponentialLR, StepLR
from ignite.engine import Engine, Events
from ignite.handlers.param_scheduler import (
ConcatScheduler,
CosineAnnealingScheduler,
... |
import sys
import time
from unittest.mock import patch
import pytest
from pytest import approx
from ignite.engine import Engine, EventEnum, Events
from ignite.handlers.time_profilers import BasicTimeProfiler, HandlersTimeProfiler
if sys.platform.startswith("darwin"):
pytest.skip("Skip if on MacOS", allow_module_... |
import copy
import os
from pathlib import Path
from unittest.mock import MagicMock
import matplotlib
import pytest
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
import ignite.distributed as idist
from ignite.contrib.handlers import FastaiLRFinder
from ignite.engine impo... |
import pytest
from ignite.engine.engine import Engine, Events
from ignite.handlers import EpochOutputStore
@pytest.fixture
def dummy_evaluator():
def dummy_process_function(engine, batch):
return 1, 0
dummy_evaluator = Engine(dummy_process_function)
return dummy_evaluator
@pytest.fixture
def ... |
import numpy as np
import pytest
import torch
from ignite.engine import Engine, Events, State
from ignite.handlers import TerminateOnNan
@pytest.mark.parametrize(
"state_output,should_terminate",
[
(1.0, False),
(torch.tensor(123.45), False),
(torch.asin(torch.tensor([1.0, 2.0, 0.0, 3... |
import os
import stat
import warnings
from collections import OrderedDict
from collections.abc import Mapping
from pathlib import Path
from unittest.mock import MagicMock
import pytest
import torch
import torch.nn as nn
from packaging.version import Version
import ignite.distributed as idist
from ignite.engine import... |
import pytest
from ignite.base import Serializable
def test_state_dict():
s = Serializable()
with pytest.raises(NotImplementedError):
s.state_dict()
def test_load_state_dict():
s = Serializable()
s.load_state_dict({})
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup ------------------------------------------------------------... |
"""
MNIST example with training and validation monitoring using Neptune.
Requirements:
Neptune: `pip install neptune`
Usage:
Run the example:
```bash
python mnist_with_neptune_logger.py
```
Go to https://neptune.ai and explore your run.
Note:
You can view example runs here:
https... |
"""
MNIST example with training and validation monitoring using TensorboardX and Tensorboard.
Requirements:
Optionally TensorboardX (https://github.com/lanpa/tensorboard-pytorch): `pip install tensorboardX`
Tensorboard: `pip install tensorflow` (or just install tensorboard without the rest of tensorflow)
U... |
"""
MNIST example with training and validation monitoring using ClearML.
Requirements:
ClearML: `pip install clearml`
Usage:
Run the example:
```bash
python mnist_with_clearml_logger.py
```
"""
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import ... |
"""
MNIST example with training and validation monitoring using Tensorboard on TPU
Requirements:
- PyTorch >= 1.5
- PyTorch XLA >= 1.5
- Tensorboard: `pip install tensorflow` (or just install tensorboard without the rest of tensorflow)
Usage:
Start tensorboard:
```bash
tensorboard --logdir=/t... |
from argparse import ArgumentParser
from pathlib import Path
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compos... |
from argparse import ArgumentParser
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.engine import ... |
"""
MNIST example with training and validation monitoring using Weights & Biases
Requirements:
Weights & Biases: `pip install wandb`
Usage:
Make sure you are logged into Weights & Biases (use the `wandb` command).
Run the example:
```bash
python mnist_with_wandb_logger.py
```
Go to h... |
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.contrib.handlers import ProgressB... |
"""
MNIST example with training and validation monitoring using Visdom.
Requirements:
Visdom (https://github.com/facebookresearch/visdom.git):
`pip install git+https://github.com/facebookresearch/visdom.git`
Usage:
Start visdom server:
```bash
visdom -logging_level 30
```
Run the exam... |
"""
MNIST example with training and validation monitoring using Tensorboard.
Requirements:
TensorboardX (https://github.com/lanpa/tensorboard-pytorch): `pip install tensorboardX`
or PyTorch >= 1.2 which supports Tensorboard
Tensorboard: `pip install tensorflow` (or just install tensorboard without the res... |
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from tqdm import tqdm
from ignite.engine impo... |
import argparse
import os
import random
import warnings
from pathlib import Path
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint, Timer
fr... |
import fire
import torch
from torch.cuda.amp import autocast, GradScaler
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torchvision.models import wide_resnet50_2
from utils import get_train_eval_loaders
from ignite.contrib.handlers import ProgressBar
from ignite.engine import convert_tensor, cr... |
import fire
import torch
from apex import amp
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torchvision.models import wide_resnet50_2
from utils import get_train_eval_loaders
from ignite.contrib.handlers import ProgressBar
from ignite.engine import convert_tensor, create_supervised_evaluator, ... |
import random
from torch.utils.data import DataLoader, Subset
from torchvision.datasets.cifar import CIFAR100
from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomErasing, RandomHorizontalFlip, ToTensor
def get_train_eval_loaders(path, batch_size=256):
"""Setup the dataflow:
- lo... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.