python_code stringlengths 0 258k |
|---|
from torchbenchmark.util.framework.timm.model_factory import TimmModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(TimmModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, jit=False, batch_size=None, extra_arg... |
"""
Maskrcnn model from torchvision
"""
import torch
import os
import itertools
import random
import numpy as np
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
from pathlib import Path
from typing import Tuple
# Model specific imports
import torchvision
from .coco_utils impo... |
import os
import sys
import subprocess
from pathlib import Path
def setup_data_dir():
current_dir = Path(os.path.dirname(os.path.realpath(__file__)))
coco2017_data_dir = os.path.join(current_dir.parent.parent, "data", ".data", "coco2017-minimal")
assert os.path.exists(coco2017_data_dir), "Couldn't find coc... |
import torch
from pycocotools import mask as coco_mask
from torchvision.transforms import functional as F
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
... |
import os
import logging
import torch
from pathlib import Path
from contextlib import suppress
# TorchBench imports
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
# effdet imports
from effdet import create_model, create_loader
from effdet.data import resolve_inpu... |
from effdet.data import resolve_input_config, SkipSubset
from effdet import create_loader, create_dataset, create_evaluator
from effdet.anchors import Anchors, AnchorLabeler
from effdet.data.dataset_config import CocoCfg
from dataclasses import dataclass, field
from typing import Dict
@dataclass
class Coco2017Minima... |
import torch
from collections import OrderedDict
from contextlib import suppress
from timm.utils import AverageMeter, reduce_tensor
def train_epoch(
epoch, model, loader, optimizer, args,
lr_scheduler=None, saver=None, output_dir='', amp_autocast=suppress, loss_scaler=None, model_ema=None,
num_... |
import yaml
import argparse
from timm.utils import add_bool_arg
def get_args(config_file=None):
def _parse_args():
if config_file:
with open(config_file, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# There may be remaining unrecognized ... |
import os
import sys
import patch
from pathlib import Path
import subprocess
def check_data_dir():
current_dir = Path(os.path.dirname(os.path.realpath(__file__)))
coco2017_data_dir = os.path.join(current_dir.parent.parent, "data", ".data", "coco2017-minimal")
assert os.path.exists(coco2017_data_dir), "Coul... |
from torchbenchmark.util.framework.timm.model_factory import TimmModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(TimmModel):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]... |
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[... |
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirem... |
import numpy as np
import random
import time
import torch
from argparse import Namespace
from .meta import Meta
from pathlib import Path
from typing import Tuple
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import OTHER
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = Fa... |
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from torch import optim
import numpy as np
from .learner import Learner
from copy import deepcopy
class Meta(nn.Module):
"""
Meta Learn... |
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from typing import List
class Learner(nn.Module):
"""
"""
def __init__(self, config, imgc, imgsz):
"""
:param config: network config file, type:list of (string, list)
:param imgc: ... |
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[... |
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirem... |
import torch
from . import tke_pytorch
from typing import Tuple
from torchbenchmark.tasks import OTHER
from ...util.model import BenchmarkModel
def _generate_inputs(size):
import numpy as np
import math
np.random.seed(17)
shape = (
math.ceil(2 * size ** (1 / 3)),
math.ceil(2 * size *... |
import torch
def solve_tridiag(a, b, c, d):
"""
Solves a tridiagonal matrix system with diagonals a, b, c and RHS vector d.
"""
assert a.shape == b.shape and a.shape == c.shape and a.shape == d.shape
n = a.shape[-1]
for i in range(1, n):
w = a[..., i] / b[..., i - 1]
b[..., i... |
if __name__ == "__main__":
pass
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
# Original train batch size per device: 8
# Source: https://github.com/huggingface/transformers/blob/master/examples/flax/lan... |
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirem... |
# Copyright (c) 2017 NVIDIA Corporation
import argparse
from math import sqrt
parser = argparse.ArgumentParser(description='RMSE_calculator')
parser.add_argument('--path_to_predictions', type=str, default="", metavar='N',
help='Path file with actual ratings and predictions')
parser.add_argument('-... |
# Benchmark created from NVidia DeepRecommender github project:
# https://github.com/NVIDIA/DeepRecommender
# a32a8a5c23092c551616acf6fac5b32e1155d18b
# Test supports eval and train modes for cpu and cuda targets.
#
# Both nvtrain.py and nvinfer.py support all original command
# line parameters but tensorflow depen... |
# Copyright (c) 2017 NVIDIA Corporation
# parameters to run benchmark on cpu
# --path_to_train_data Netflix/N1W_TRAIN --path_to_eval_data Netflix/N1W_TEST --hidden_layers 512,512,1024 --non_linearity_type selu --save_path model_save/model.epoch_0 --drop_prob 0.8 --predictions_path preds.txt --nooutput --forcecpu
# pa... |
# Copyright (c) 2017 NVIDIA Corporation
# to run against cuda:
# --gpu_ids 0 --path_to_train_data Netflix/N1W_TRAIN --path_to_eval_data Netflix/N1W_VALID --hidden_layers 512,512,1024 --non_linearity_type selu --batch_size 128 --logdir model_save --drop_prob 0.8 --optimizer momentum --lr 0.005 --weight_decay 0 --aug_st... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
# Copyright (c) 2017 NVIDIA Corporation
from os import listdir, path, makedirs
import random
import sys
import time
import datetime
def print_stats(data):
total_ratings = 0
print("STATS")
for user in data:
total_ratings += len(data[user])
print("Total Ratings: {}".format(total_ratings))
print("Total User... |
# Copyright (c) 2017 NVIDIA Corporation
import sys
import datetime
import random
from math import floor
def print_stats(data):
total_ratings = 0
print("STATS")
for user in data:
total_ratings += len(data[user])
print("Total Ratings: {}".format(total_ratings))
print("Total User count: {}".format(len(data.... |
# Copyright (c) 2017 NVIDIA Corporation
|
# Copyright (c) 2017 NVIDIA Corporation
|
# Copyright (c) 2017 NVIDIA Corporation
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as weight_init
from torch.autograd import Variable
def activation(input, kind):
#print("Activation: {}".format(kind))
if kind == 'selu':
return F.selu(input)
elif kind == 'relu':
... |
# Copyright (c) 2017 NVIDIA Corporation
|
# Copyright (c) 2017 NVIDIA Corporation
"""Data Layer Classes"""
from os import listdir, path
from random import shuffle
import torch
class UserItemRecDataProvider:
def __init__(self, params, user_id_map=None, item_id_map=None):
self._params = params
self._data_dir = self.params['data_dir']
self._extensi... |
import os
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
class Model(Detectron2Model):
task = COMPUT... |
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
import torch
import torch.optim as optim
import torch.nn as nn
import torch.utils.data as data
import torchvision.models as models
from opacus import PrivacyEngine
from opacus.validators.module_validator import ModuleValidator
from typing import Tuple
from ...util.model import BenchmarkModel
from torchbenchmark.tasks ... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel
from torchbenchmark.tasks import COMPUTER_VISION
import torch.optim as optim
import torch
import torchvision.models as models
class Model(TorchVisionModel):
task = COMPUTER_VISION.CLASSIFICATION
# Original train batch size: 512, o... |
import argparse
import random
from collections import deque
import math
import gym
import numpy as np
class ActionRepeatWrapper(gym.Wrapper):
def __init__(self, env, repeat_multiplier=8):
super().__init__(env)
self.action_space = gym.spaces.Box(
-1.0, 1.0, shape=(1 + self.env.action_s... |
import dataclasses
@dataclasses.dataclass
class SACConfig:
env_id = "Pendulum-v1"
seed = 123
num_steps = 1
transitions_per_step = 1
max_episode_steps = 10
batch_size = 512
tau = 0.005
actor_lr = 1e-4
critic_lr = 1e-4
gamma = 0.99
init_alpha = 0.1
alpha_lr = 1e-4
buff... |
import torch
import os
import copy
import pickle
import math
from itertools import chain
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import REINFORCEMENT_LEARNING
from typing import Tuple
from .config import SACConfig
from .envs import load_gym
from .sac import SACAgent
from .replay import Prio... |
import argparse
import copy
import math
import os
from itertools import chain
import numpy as np
import tensorboardX
import torch
import torch.nn.functional as F
import tqdm
from . import envs, nets, replay, utils
class SACAgent:
def __init__(
self,
obs_space_size,
act_space_size,
... |
import numpy as np
import torch
def unique(sorted_array):
"""
More efficient implementation of np.unique for sorted arrays
:param sorted_array: (np.ndarray)
:return:(np.ndarray) sorted_array without duplicate elements
"""
if len(sorted_array) == 1:
return sorted_array
left = sorted... |
import math
import os
import random
from collections import namedtuple
import gym
import numpy as np
import torch
def clean_hparams_dict(hparams_dict):
return {key: val for key, val in hparams_dict.items() if val}
def get_grad_norm(model):
total_norm = 0.0
for p in model.parameters():
try:
... |
import os
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import distributions as pyd
from torch import nn
from . import utils
def weight_init(m):
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Conv2d)... |
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel
from torchbenchmark.tasks import COMPUTER_VISION
import torchvision.models as models
class Model(TorchVisionModel):
task = COMPUTER_VISION.CLASSIFICATION
# Train batch size: use the training batch in paper.
# Source: https://ar... |
# Ported from pytorch example:
# https://github.com/pytorch/examples/blob/master/dcgan/main.py
from __future__ import print_function
import argparse
import os
import random
from typing import Any, Tuple
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.opt... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import os
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model
MODEL_NAME = os.path.basename(os.path.dirname(__file__))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
class Model(Detectron2Model):
task = COMPUTER_VISION.DETECTI... |
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
import os
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
class Model(Detectron2Model):
task = COMPUT... |
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel
from torchbenchmark.tasks import COMPUTER_VISION
import torchvision.models as models
class Model(TorchVisionModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 128
DEFAULT_EVAL_BSIZE = 64
def __init__(self,... |
import torch
def get_drhodT(salt, temp, p):
rho0 = 1024.0
z0 = 0.0
theta0 = 283.0 - 273.15
grav = 9.81
betaT = 1.67e-4
betaTs = 1e-5
gammas = 1.1e-8
zz = -p - z0
thetas = temp - theta0
return -(betaTs * thetas + betaT * (1 - gammas * grav * zz * rho0)) * rho0
def get_drhodS(... |
import torch
from . import isoneutral_pytorch
from torchbenchmark.tasks import OTHER
from ...util.model import BenchmarkModel
from typing import Tuple
def _generate_inputs(size):
import math
import numpy as np
np.random.seed(17)
shape = (
math.ceil(2 * size ** (1 / 3)),
math.ceil(2 * ... |
if __name__ == "__main__":
pass
|
"""
pytorch_struct model, Unsupervised CFG task
https://github.com/harvardnlp/pytorch-struct/blob/master/notebooks/Unsupervised_CFG.ipynb
"""
import os
import pytest
import torchtext
import numpy as np
import torch, random
import torch_struct
from torch_struct import SentCFG
from .networks.NeuralCFG import NeuralCFG
f... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import torch
import torch.nn as nn
class Res(nn.Module):
def __init__(self, H):
super().__init__()
self.u1 = nn.Linear(H, H)
self.u2 = nn.Linear(H, H)
self.v1 = nn.Linear(H, H)
self.v2 = nn.Linear(H, H)
self.w = nn.Linear(H, H)
def forward(self, y):
y ... |
# This example was adapated from https://github.com/muhrin/milad
# It is licensed under the GLPv3 license. You can find a copy of it
# here: https://www.gnu.org/licenses/gpl-3.0.en.html .
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from functorch import vmap, jacrev
f... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[... |
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirem... |
"""
fastNLP model (TorchBenchmark Version)
This model resembles the "BertEmedding Q&A" task in [fastNLP Tutorial](https://fastnlp.readthedocs.io/zh/latest/tutorials/extend_1_bert_embedding.html).
Input data simulates [CMRC2018 dataset](https://ymcui.com/cmrc2018/).
The program runs only for benchmark purposes and does... |
import subprocess
import os
import sys
import patch
def patch_fastnlp():
import fastNLP
current_dir = os.path.dirname(os.path.abspath(__file__))
patch_file = os.path.join(current_dir, "fastnlp.patch")
fastNLP_dir = os.path.dirname(fastNLP.__file__)
fastNLP_target_file = os.path.join(fastNLP_dir, "e... |
"""
Generator of a simulated CMRC2018 dataset.
Use random Chinese characters with the same length as the original dataset.
"""
import os
import pathlib
import json
import random
TRAIN_NUM_BATCH = 1
EVAL_NUM_BATCH = 1
CMRC2018_TRAIN_SPEC = {
# Original
# "data_size": 2403,
# Benchmark
"data_size": 6, #... |
import torch
from . import eos_pytorch
from torchbenchmark.tasks import OTHER
from ...util.model import BenchmarkModel
from typing import Tuple
def _generate_inputs(size):
import math
import numpy as np
np.random.seed(17)
shape = (
math.ceil(2 * size ** (1/3)),
math.ceil(2 * size ** (1... |
"""
==========================================================================
in-situ density, dynamic enthalpy and derivatives
from Absolute Salinity and Conservative
Temperature, using the computationally-efficient 48-term expression for
density in terms of SA, CT and p (IOC et al., 2010).
==================... |
if __name__ == "__main__":
pass
|
# Generated by gen_torchvision_benchmark.py
import torch
import torch.optim as optim
import torchvision.models as models
from torch.quantization import quantize_fx
from torchbenchmark.tasks import COMPUTER_VISION
from ...util.model import BenchmarkModel
from typing import Tuple
class Model(BenchmarkModel):
task =... |
import argparse
import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from typing import Tuple
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
from .pytorch_unet.unet import UNet
from .pytorch_unet.utils.dice_score... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'pytorch_unet/requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import argparse
import logging
import os
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from torchvision import transforms
from utils.data_loading import BasicDataset
from unet import UNet
from utils.utils import plot_img_and_mask
def predict_img(net,
full_img,
... |
import argparse
import logging
import sys
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
import wandb
from torch import optim
from torch.utils.data import DataLoader, random_split
from tqdm import tqdm
from utils.data_loading import BasicDataset, CarvanaDataset
from utils.... |
import torch
import torch.nn.functional as F
from tqdm import tqdm
from utils.dice_score import multiclass_dice_coeff, dice_coeff
def evaluate(net, dataloader, device):
net.eval()
num_val_batches = len(dataloader)
dice_score = 0
# iterate over the validation set
for batch in tqdm(dataloader, tot... |
import torch
from unet import UNet as _UNet
def unet_carvana(pretrained=False):
"""
UNet model trained on the Carvana dataset ( https://www.kaggle.com/c/carvana-image-masking-challenge/data ).
Set the scale to 0.5 (50%) when predicting.
"""
net = _UNet(n_channels=3, n_classes=2, bilinear=True)
... |
import logging
from os import listdir
from os.path import splitext
from pathlib import Path
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
class BasicDataset(Dataset):
def __init__(self, images_dir: str, masks_dir: str, scale: float = 1.0, mask_suffix: str = ''):
... |
import torch
from torch import Tensor
def dice_coeff(input: Tensor, target: Tensor, reduce_batch_first: bool = False, epsilon=1e-6):
# Average of Dice coefficient for all batches, or for a single mask
assert input.size() == target.size()
if input.dim() == 2 and reduce_batch_first:
raise ValueError... |
import matplotlib.pyplot as plt
def plot_img_and_mask(img, mask):
classes = mask.shape[0] if len(mask.shape) > 2 else 1
fig, ax = plt.subplots(1, classes + 1)
ax[0].set_title('Input image')
ax[0].imshow(img)
if classes > 1:
for i in range(classes):
ax[i + 1].set_title(f'Output ... |
from .unet_model import UNet
|
""" Parts of the U-Net model """
import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_c... |
""" Full assembly of the parts to form the complete network """
from .unet_parts import *
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=True):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
... |
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel
from torchbenchmark.tasks import COMPUTER_VISION
import torchvision.models as models
class Model(TorchVisionModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, ... |
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel
from torchbenchmark.tasks import COMPUTER_VISION
import torchvision.models as models
class Model(TorchVisionModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 16
DEFAULT_EVAL_BSIZE = 8
def __init__(self, t... |
import numpy as np
import torch
import torchvision
import cv2, pdb
def composite4(fg, bg, a):
fg = np.array(fg, np.float32)
alpha= np.expand_dims(a / 255,axis=2)
im = alpha * fg + (1 - alpha) * bg
im = im.astype(np.uint8)
return im
def compose_image_withshift(alpha_pred,fg_pred,bg,seg):
image_sh=torch.zero... |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
#import matplotlib.pyplot as plt
import pdb
from torch.nn.modules.loss import _Loss
from torch.autograd import Function, Variable
#import scipy.io as sio
class alpha_loss(_Loss):
def __init__(self):
super(alpha_loss,self).__init_... |
import os
from io import BytesIO
import tarfile
import tempfile
from six.moves import urllib
import numpy as np
from PIL import Image
import cv2, pdb, glob, argparse
import tensorflow as tf
class DeepLabModel(object):
"""Class to load deeplab model and run inference."""
INPUT_TENSOR_NAME = 'ImageTensor:0'
OUTP... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.