python_code stringlengths 0 229k |
|---|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import GeometricMeanRelativeAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = GeometricMeanRelativeAbsoluteE... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import FractionalAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = FractionalAbsoluteError()
with pytest... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MedianAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MedianAbsoluteError()
with pytest.raises(... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MedianRelativeAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MedianRelativeAbsoluteError()
wit... |
from typing import Optional
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression._base import _BaseRegression, _torch_median
def test_base_regression_shapes():
class L1(_BaseRegression):
def reset(self):
self._sum_of_errors... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import FractionalBias
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = FractionalBias()
with pytest.raises(
N... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MeanError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MeanError()
with pytest.raises(NotComputableError, ... |
import os
import numpy as np
import pytest
import torch
from pytest import approx, raises
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MeanAbsoluteRelativeError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_wrong_input_shapes():
m ... |
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MaximumAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MaximumAbsoluteError()
with pytest.raise... |
import os
import numpy as np
import pytest
import torch
from sklearn.metrics import r2_score
import ignite.distributed as idist
from ignite.contrib.metrics.regression import R2Score
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = R2Score()
with p... |
import os
import sys
from unittest.mock import call, MagicMock
import pytest
import torch
import torch.nn as nn
from torch.utils.data.distributed import DistributedSampler
import ignite.contrib.handlers as handlers
import ignite.distributed as idist
from ignite.contrib.engines.common import (
_setup_logging,
... |
# coding: utf-8
import unittest.mock as mock
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ignite.contrib.engines import create_supervised_tbptt_trainer, Tbptt_Events
from ignite.contrib.engines.tbptt import _detach_hidden
def test_detach_hidden_R... |
from unittest.mock import Mock
import pytest
import torch
@pytest.fixture()
def norm_mock():
def norm(x: torch.Tensor):
return x.norm()
norm_mock = Mock(side_effect=norm, spec=norm)
norm_mock.configure_mock(**{"__name__": "norm"})
norm_mock.reset_mock()
return norm_mock
@pytest.fixture... |
import sys
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.mlflow_logger import (
global_step_from_engine,
MLflowLogger,
OptimizerParamsHandler,
OutputHandler,
)
from ignite.engine import Engine, Events, State
def test_output_handler_with_wrong_logge... |
from typing import Any, Union
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.base_logger import (
BaseLogger,
BaseOptimizerParamsHandler,
BaseOutputHandler,
BaseWeightsHandler,
BaseWeightsScalarHandler,
)
from ignite.engine import Engine, Events, ... |
import math
import os
from collections import defaultdict
from unittest.mock import ANY, call, MagicMock, patch
import clearml
import pytest
import torch
from clearml.binding.frameworks import WeightsFileHandler
from clearml.model import Framework
import ignite.distributed as idist
from ignite.contrib.handlers.clearm... |
class MockFP16DeepSpeedZeroOptimizer:
def __init__(self, optimizer):
self.optimizer = optimizer
def step(self, closure=None):
self.optimizer.step()
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_... |
import math
import warnings
from unittest.mock import MagicMock
import pytest
import torch
from ignite.contrib.handlers.neptune_logger import (
global_step_from_engine,
GradsScalarHandler,
NeptuneLogger,
NeptuneSaver,
OptimizerParamsHandler,
OutputHandler,
WeightsScalarHandler,
)
from igni... |
import sys
from unittest.mock import ANY, call, MagicMock, patch
import pytest
import torch
from ignite.contrib.handlers.visdom_logger import (
_DummyExecutor,
global_step_from_engine,
GradsScalarHandler,
OptimizerParamsHandler,
OutputHandler,
VisdomLogger,
WeightsScalarHandler,
)
from ign... |
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.wandb_logger import (
global_step_from_engine,
OptimizerParamsHandler,
OutputHandler,
WandBLogger,
)
from ignite.engine import Events, State
def test_optimizer_params_handler_wrong_setup():
with pyt... |
import os
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.polyaxon_logger import (
global_step_from_engine,
OptimizerParamsHandler,
OutputHandler,
PolyaxonLogger,
)
from ignite.engine import Engine, Events, State
os.environ["POLYAXON_NO_OP"] = "1"
d... |
# -*- coding: utf-8 -*-
import sys
import time
from argparse import Namespace
from unittest.mock import patch
import numpy as np
import pytest
import torch
from packaging.version import Version
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.handlers import Termina... |
import math
import os
from unittest.mock import ANY, call, MagicMock, patch
import pytest
import torch
from ignite.contrib.handlers.tensorboard_logger import (
global_step_from_engine,
GradsHistHandler,
GradsScalarHandler,
OptimizerParamsHandler,
OutputHandler,
TensorboardLogger,
WeightsHi... |
import os
import random
import sys
from collections.abc import Mapping
from unittest.mock import patch
import numpy as np
import pytest
import torch
import torch.nn as nn
from torch.optim import SGD
from torch.utils.data import BatchSampler, DataLoader, RandomSampler
import ignite.distributed as idist
from ignite.eng... |
import os
import time
from unittest.mock import call, MagicMock, Mock
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events, State
from ignite.engine.deterministic import keep_random_state
from ignite.metrics import Average
from tests.ignite.engine i... |
from collections.abc import Mapping
import pytest
import torch
from ignite.engine import Engine, Events, State
from tests.ignite.engine import BatchChecker, EpochCounter, IterationCounter
def test_state_dict():
engine = Engine(lambda e, b: 1)
sd = engine.state_dict()
assert isinstance(sd, Mapping) and l... |
import torch
try:
from torch.utils.data import IterableDataset
except ImportError:
class IterableDataset:
pass
class BatchChecker:
def __init__(self, data, init_counter=0):
self.counter = init_counter
self.data = data
self.true_batch = None
def check(self, batch):
... |
from enum import Enum
from unittest.mock import MagicMock
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.engine.events import CallableEventWithFilter, EventEnum, EventsList
def test_custom_events():
class CustomEvents(EventEnum):
TEST_E... |
import os
from importlib.util import find_spec
from typing import Optional, Union
from unittest import mock
from unittest.mock import MagicMock, patch
import pytest
import torch
from packaging.version import Version
from pytest import approx
from torch.nn.functional import mse_loss
from torch.optim import SGD
import ... |
import functools
import gc
from unittest.mock import call, create_autospec, MagicMock
import pytest
from pytest import raises
from ignite.engine import Engine, Events, State
from ignite.engine.events import EventsList
class DummyEngine(Engine):
def __init__(self):
super(DummyEngine, self).__init__(lambd... |
import sys
import time
import pytest
from ignite.engine import Engine, Events
from ignite.handlers import Timer
if sys.platform.startswith("darwin"):
pytest.skip("Skip if on MacOS", allow_module_level=True)
def test_timer():
sleep_t = 0.2
n_iter = 3
def _train_func(engine, batch):
time.sle... |
import pytest
import torch
@pytest.fixture()
def dummy_model_factory():
class DummyModel(torch.nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.fc1 = torch.nn.Linear(10, 10)
self.fc2 = torch.nn.Linear(12, 12)
self.fc1.weight.data.zero_... |
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.handlers import EarlyStopping
def do_nothing_update_fn(engine, batch):
pass
def test_args_validation():
trainer = Engine(do_nothing_update_fn)
with pytest.raises(ValueError, ma... |
from unittest.mock import MagicMock
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
def test_global_step_from_engine():
iteration = 12
epoch = 23
trainer = Engine(lambda e, b: None)
trainer.state.iteration = iteration
trainer.state.epoch = epoch
... |
import os
from typing import Any, Callable, Union
import pytest
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel, DistributedDataParallel
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.handlers import EMAHandler
def _get_dummy_model() -> nn.Modul... |
import re
from pathlib import Path
from unittest.mock import patch
import pytest
import torch
import torch.nn as nn
from packaging.version import Version
from ignite.engine import Engine, Events
from ignite.handlers.state_param_scheduler import (
ExpStateScheduler,
LambdaStateScheduler,
MultiStepStateSche... |
import time
import pytest
from ignite.engine import Engine, Events
from ignite.handlers import TimeLimit
def test_arg_validation():
with pytest.raises(ValueError, match=r"Argument limit_sec should be a positive integer."):
TimeLimit(limit_sec=-5)
with pytest.raises(TypeError, match=r"Argument limit... |
# Needed to collect coverage data
|
from unittest.mock import MagicMock, patch
import numpy as np
import pytest
import torch
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, ExponentialLR, StepLR
from ignite.engine import Engine, Events
from ignite.handlers.param_scheduler import (
ConcatScheduler,
CosineAnnealingScheduler,
... |
import sys
import time
from unittest.mock import patch
import pytest
from pytest import approx
from ignite.engine import Engine, EventEnum, Events
from ignite.handlers.time_profilers import BasicTimeProfiler, HandlersTimeProfiler
if sys.platform.startswith("darwin"):
pytest.skip("Skip if on MacOS", allow_module_... |
import copy
import os
from pathlib import Path
from unittest.mock import MagicMock
import matplotlib
import pytest
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
import ignite.distributed as idist
from ignite.contrib.handlers import FastaiLRFinder
from ignite.engine impo... |
import pytest
from ignite.engine.engine import Engine, Events
from ignite.handlers import EpochOutputStore
@pytest.fixture
def dummy_evaluator():
def dummy_process_function(engine, batch):
return 1, 0
dummy_evaluator = Engine(dummy_process_function)
return dummy_evaluator
@pytest.fixture
def ... |
import numpy as np
import pytest
import torch
from ignite.engine import Engine, Events, State
from ignite.handlers import TerminateOnNan
@pytest.mark.parametrize(
"state_output,should_terminate",
[
(1.0, False),
(torch.tensor(123.45), False),
(torch.asin(torch.tensor([1.0, 2.0, 0.0, 3... |
import os
import stat
import warnings
from collections import OrderedDict
from collections.abc import Mapping
from pathlib import Path
from unittest.mock import MagicMock
import pytest
import torch
import torch.nn as nn
from packaging.version import Version
import ignite.distributed as idist
from ignite.engine import... |
import pytest
from ignite.base import Serializable
def test_state_dict():
s = Serializable()
with pytest.raises(NotImplementedError):
s.state_dict()
def test_load_state_dict():
s = Serializable()
s.load_state_dict({})
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup ------------------------------------------------------------... |
"""
MNIST example with training and validation monitoring using Neptune.
Requirements:
Neptune: `pip install neptune`
Usage:
Run the example:
```bash
python mnist_with_neptune_logger.py
```
Go to https://neptune.ai and explore your run.
Note:
You can view example runs here:
https... |
"""
MNIST example with training and validation monitoring using TensorboardX and Tensorboard.
Requirements:
Optionally TensorboardX (https://github.com/lanpa/tensorboard-pytorch): `pip install tensorboardX`
Tensorboard: `pip install tensorflow` (or just install tensorboard without the rest of tensorflow)
U... |
"""
MNIST example with training and validation monitoring using ClearML.
Requirements:
ClearML: `pip install clearml`
Usage:
Run the example:
```bash
python mnist_with_clearml_logger.py
```
"""
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import ... |
"""
MNIST example with training and validation monitoring using Tensorboard on TPU
Requirements:
- PyTorch >= 1.5
- PyTorch XLA >= 1.5
- Tensorboard: `pip install tensorflow` (or just install tensorboard without the rest of tensorflow)
Usage:
Start tensorboard:
```bash
tensorboard --logdir=/t... |
from argparse import ArgumentParser
from pathlib import Path
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compos... |
from argparse import ArgumentParser
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.engine import ... |
"""
MNIST example with training and validation monitoring using Weights & Biases
Requirements:
Weights & Biases: `pip install wandb`
Usage:
Make sure you are logged into Weights & Biases (use the `wandb` command).
Run the example:
```bash
python mnist_with_wandb_logger.py
```
Go to h... |
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.contrib.handlers import ProgressB... |
"""
MNIST example with training and validation monitoring using Visdom.
Requirements:
Visdom (https://github.com/facebookresearch/visdom.git):
`pip install git+https://github.com/facebookresearch/visdom.git`
Usage:
Start visdom server:
```bash
visdom -logging_level 30
```
Run the exam... |
"""
MNIST example with training and validation monitoring using Tensorboard.
Requirements:
TensorboardX (https://github.com/lanpa/tensorboard-pytorch): `pip install tensorboardX`
or PyTorch >= 1.2 which supports Tensorboard
Tensorboard: `pip install tensorflow` (or just install tensorboard without the res... |
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from tqdm import tqdm
from ignite.engine impo... |
import argparse
import os
import random
import warnings
from pathlib import Path
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint, Timer
fr... |
import fire
import torch
from torch.cuda.amp import autocast, GradScaler
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torchvision.models import wide_resnet50_2
from utils import get_train_eval_loaders
from ignite.contrib.handlers import ProgressBar
from ignite.engine import convert_tensor, cr... |
import fire
import torch
from apex import amp
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torchvision.models import wide_resnet50_2
from utils import get_train_eval_loaders
from ignite.contrib.handlers import ProgressBar
from ignite.engine import convert_tensor, create_supervised_evaluator, ... |
import random
from torch.utils.data import DataLoader, Subset
from torchvision.datasets.cifar import CIFAR100
from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomErasing, RandomHorizontalFlip, ToTensor
def get_train_eval_loaders(path, batch_size=256):
"""Setup the dataflow:
- lo... |
import fire
import torch
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torchvision.models import wide_resnet50_2
from utils import get_train_eval_loaders
from ignite.contrib.handlers import ProgressBar
from ignite.engine import convert_tensor, create_supervised_evaluator, Engine, Events
from i... |
import os
from pathlib import Path
import brevitas.nn as qnn
import torch
import torch.nn as nn
from pact import PACTReLU
from torchvision import datasets, models
from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomHorizontalFlip, ToTensor
train_transform = Compose(
[
Pad(4),
... |
from datetime import datetime
from pathlib import Path
import fire
import torch
import torch.nn as nn
import torch.optim as optim
import utils
from torch.cuda.amp import autocast, GradScaler
import ignite
import ignite.distributed as idist
from ignite.contrib.engines import common
from ignite.contrib.handlers import ... |
# Implementation taken from https://discuss.pytorch.org/t/evaluator-returns-nan/107972/3
# Ref: https://arxiv.org/abs/1805.06085
import torch
import torch.nn as nn
class PACTClip(torch.autograd.Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.save_for_backward(x, alpha)
return torch.c... |
import torch.nn as nn
import torch.nn.init as init
class Net(nn.Module):
def __init__(self, upscale_factor):
super(Net, self).__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.... |
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from model import Net
from torch.utils.data import DataLoader
from torchvision.transforms.functional import center_crop, resize, to_tensor
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine... |
import argparse
import numpy as np
import torch
from PIL import Image
from torchvision.transforms.functional import to_tensor
# Training settings
parser = argparse.ArgumentParser(description="PyTorch Super Res Example")
parser.add_argument("--input_image", type=str, required=True, help="input image to use")
parser.ad... |
from typing import Callable, Optional
import numpy as np
import torch
try:
from image_dataset_viz import render_datapoint
except ImportError:
raise ModuleNotFoundError(
"Please install image-dataset-viz via pip install --upgrade git+https://github.com/vfdev-5/ImageDatasetViz.git"
)
def tensor_to... |
import torch
import ignite
import ignite.distributed as idist
from ignite.handlers import DiskSaver
def initialize(config):
device = idist.device()
model = config.model.to(device)
optimizer = config.optimizer
# Adapt model to dist config
model = idist.auto_model(model)
optimizer = idist.aut... |
from pathlib import Path
from typing import Callable, Optional, Tuple
import cv2
import torch
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Subset
from torchvision.datasets import ImageFolder
import ignite.distributed as idist
from ignite.utils import convert_tensor
def opencv_loader... |
import os
from functools import partial
from pathlib import Path
import fire
import torch
try:
from torch.cuda.amp import autocast, GradScaler
except ImportError:
raise RuntimeError("Please, use recent PyTorch version, e.g. >=1.6.0")
import dataflow as data
import utils
import vis
from py_config_runner impor... |
# Basic training configuration
import os
from functools import partial
import albumentations as A
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
from albumentations.pytorch import ToTensorV2 as ToTensor
from dataflow import denormalize, get_train_val_loaders
from torchvision.m... |
# Basic training configuration
import os
from functools import partial
import albumentations as A
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
from albumentations.pytorch import ToTensorV2 as ToTensor
from dataflow import denormalize, get_train_val_loaders
from torchvision.m... |
import numpy as np
import torch
from PIL import Image
try:
from image_dataset_viz import render_datapoint
except ImportError:
raise ModuleNotFoundError(
"Please install image-dataset-viz via pip install --upgrade git+https://github.com/vfdev-5/ImageDatasetViz.git"
)
def _getvocpallete(num_cls):
... |
import torch
import ignite
import ignite.distributed as idist
from ignite.handlers import DiskSaver
def initialize(config):
device = idist.device()
model = config.model.to(device)
optimizer = config.optimizer
# Adapt model to dist config
model = idist.auto_model(model)
optimizer = idist.aut... |
import cv2
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from torch.utils.data.dataset import Subset
from torchvision.datasets.sbd import SBDataset
from torchvision.datasets.voc import VOCSegmentation
import ignite.distributed as idist
from ignite.utils import convert_tenso... |
import os
from functools import partial
from pathlib import Path
import fire
import torch
try:
from torch.cuda.amp import autocast, GradScaler
except ImportError:
raise RuntimeError("Please, use recent PyTorch version, e.g. >=1.6.0")
import dataflow as data
import utils
import vis
from py_config_runner impor... |
# Basic training configuration
import os
from functools import partial
import albumentations as A
import cv2
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
from albumentations.pytorch import ToTensorV2 as ToTensor
from dataflow import get_train_val_loaders, ignore_mask_boundar... |
# Basic training configuration
import os
from functools import partial
import albumentations as A
import cv2
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
from albumentations.pytorch import ToTensorV2 as ToTensor
from dataflow import get_train_val_loaders, ignore_mask_boundar... |
# Basic training configuration
import os
import albumentations as A
import cv2
from albumentations.pytorch import ToTensorV2 as ToTensor
from dataflow import get_inference_dataloader, ignore_mask_boundaries
from torchvision.models.segmentation import deeplabv3_resnet101
# ##############################
# Global confi... |
import argparse
from collections import deque, namedtuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from ignite.engine import Engine, Events
try:
import gymnasium as gym
except ImportError:
rai... |
import argparse
from collections import deque
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from ignite.engine import Engine, Events
try:
import gymnasium as gym
except ImportError:
raise ModuleNot... |
import torch
class TransformerNet(torch.nn.Module):
def __init__(self):
super(TransformerNet, self).__init__()
# Initial convolution layers
self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)
self.in1 = torch.nn.InstanceNorm2d(32, affine=True)
self.conv2 = ConvLayer(32, ... |
from collections import namedtuple
import torch
from torchvision import models
from torchvision.models.vgg import VGG16_Weights
class Vgg16(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(weights=VGG16_Weights.IMAGENE... |
import sys
class Progbar(object):
def __init__(self, loader, metrics):
self.num_iterations = len(loader)
self.output_stream = sys.stdout
self.metrics = metrics
self.alpha = 0.98
def _calc_running_avg(self, engine):
for k, v in engine.state.output.items():
o... |
# coding: utf-8
import argparse
import random
from collections import OrderedDict
from pathlib import Path
import numpy as np
import torch
import utils
from handlers import Progbar
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from transformer_net imp... |
from PIL import Image
def load_image(filename, size=None, scale=None):
img = Image.open(filename)
if size is not None:
img = img.resize((size, size), Image.LANCZOS)
elif scale is not None:
img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.LANCZOS)
return img
... |
import torch.nn as nn
from transformers import AutoConfig, AutoModelForSequenceClassification
class TransformerModel(nn.Module):
def __init__(self, model_name, model_dir, dropout, n_fc, n_classes):
super(TransformerModel, self).__init__()
self.config = AutoConfig.from_pretrained(
model... |
import torch
class TransformerDataset(torch.utils.data.Dataset):
def __init__(self, texts, labels, tokenizer, max_length):
self.texts = texts
self.labels = labels
self.tokenizer = tokenizer
self.max_length = max_length
def __getitem__(self, idx):
text = str(self.texts[... |
import torch
from dataset import TransformerDataset
from datasets import load_dataset
from model import TransformerModel
from transformers import AutoTokenizer
from ignite.handlers import DiskSaver
def get_tokenizer(tokenizer_name, tokenizer_dir):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, cache_d... |
import os
from datetime import datetime
from pathlib import Path
import fire
import torch
import torch.nn as nn
import torch.optim as optim
import utils
from torch.cuda.amp import autocast, GradScaler
import ignite
import ignite.distributed as idist
from ignite.contrib.engines import common
from ignite.contrib.handle... |
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets
from ignite.contrib.handlers import ProgressBar
from ignite.engine import E... |
import os
from pathlib import Path
from torchvision import datasets, models
from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomHorizontalFlip, ToTensor
train_transform = Compose(
[
Pad(4),
RandomCrop(32, fill=128),
RandomHorizontalFlip(),
ToTensor(),
... |
from datetime import datetime
from pathlib import Path
from typing import Any, Optional
import fire
import torch
import torch.nn as nn
import torch.optim as optim
import utils
from torch.cuda.amp import autocast, GradScaler
import ignite
import ignite.distributed as idist
from ignite.contrib.engines import common
fro... |
import os
from pathlib import Path
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, models
from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomHorizontalFlip, ToTensor
import ignite.distributed as idi... |
import torch
import torchvision
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torchvision.models.mobilenet_v2(pretrained=True)
model.eval()
example = torch.rand(1, 3, 224, 224)
traced_script_module = torch.jit.trace(model, example)
torchscript_model_optimized = optimize_for_mobile(traced_script_... |
from typing import Dict, List, Optional, Tuple
import json
import math
from fairseq.data import Dictionary
import torch
import torchaudio
from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH
from torchaudio.models import Hypothesis
def get_hypo_tokens(hypo: Hypothesis) -> List[int]:
return hypo[0]
d... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.