python_code
stringlengths
0
258k
import numpy as np import cv2, pdb, glob, argparse MAX_FEATURES = 500 GOOD_MATCH_PERCENT = 0.15 def alignImages(im1, im2,masksDL): # Convert images to grayscale im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY) im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY) akaze = cv2.AKAZE_create() keypoints1, descriptors1 =...
from __future__ import print_function import torch from torch.autograd import Variable import torch.nn as nn import torch.optim as optim from tensorboardX import SummaryWriter import os import time import argparse from data_loader import AdobeDataAffineHR from functions import * from networks import ResnetCondition...
import numpy as np import cv2, pdb, glob, argparse MAX_FEATURES = 500 GOOD_MATCH_PERCENT = 0.15 def alignImages(im1, im2,masksDL): # Convert images to grayscale im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY) im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY) akaze = cv2.AKAZE_create() keypoints1, descriptors1 =...
from __future__ import print_function, division import os import torch import pandas as pd import skimage from skimage import io import numpy as np import matplotlib.pyplot as plt import pdb, random from torch.utils.data import Dataset, DataLoader import random, os, cv2 unknown_code=128 class VideoData(Dataset): def...
from __future__ import print_function import torch from torch.autograd import Variable import torch.nn as nn import torch.optim as optim from tensorboardX import SummaryWriter import os import time import argparse import numpy as np from data_loader import VideoData from functions import * from networks import Resn...
import os import time from argparse import Namespace import torch from torch.autograd import Variable import torch.nn as nn import torch.optim as optim from tensorboardX import SummaryWriter from .data_loader import VideoData from .functions import compose_image_withshift, write_tb_log from .networks import ResnetCond...
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init import numpy as np class ResnetConditionHR(nn.Module): def __init__(self, input_nc, output_nc, ngf=64, nf_part=64,norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks1=7, n_blocks2=3, padding_type='reflect'): assert(n...
####################################### # Prepares training data. Takes a path to a directory of videos + captured backgrounds, dumps frames, extracts human # segmentations. Also takes a path of background videos. Creates a training CSV file with lines of the following format, # by using all but the last 80 frames of e...
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements()
from __future__ import print_function import os, glob, time, argparse, pdb, cv2 #import matplotlib.pyplot as plt import numpy as np from skimage.measure import label import torch import torch.nn as nn from torch.autograd import Variable import torch.backends.cudnn as cudnn from functions import * from networks imp...
##Copyright 2017 Adobe Systems Inc. ## ##Licensed under the Apache License, Version 2.0 (the "License"); ##you may not use this file except in compliance with the License. ##You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ##Unless required by applicable law or agreed to in...
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel from torchbenchmark.tasks import COMPUTER_VISION import torchvision.models as models class Model(TorchVisionModel): task = COMPUTER_VISION.CLASSIFICATION DEFAULT_TRAIN_BSIZE = 32 DEFAULT_EVAL_BSIZE = 32 def __init__(self, ...
from torchbenchmark.tasks import NLP from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel class Model(HuggingFaceModel): task = NLP.LANGUAGE_MODELING DEFAULT_TRAIN_BSIZE = 4 DEFAULT_EVAL_BSIZE = 1 def __init__(self, test, device, jit=False, batch_size=None, extra_args=[...
import subprocess import sys import os from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirem...
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel from torchbenchmark.tasks import COMPUTER_VISION import torchvision.models as models class Model(TorchVisionModel): task = COMPUTER_VISION.CLASSIFICATION DEFAULT_TRAIN_BSIZE = 8 DEFAULT_EVAL_BSIZE = 8 def __init__(self, te...
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # Description: an implementation of a deep learning recommendation model (DLRM) # The model input consists of dense and sparse features. The ...
from __future__ import absolute_import, division, print_function, unicode_literals # miscellaneous import builtins import functools # import bisect # import shutil import time import json from typing import Tuple import sys # data generation from . import dlrm_data_pytorch as dp # numpy import numpy as np # pytorch...
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # Description: generate inputs and targets for the DLRM benchmark # # Utility function(s) to download and pre-process public data sets # - ...
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # Description: generate inputs and targets for the dlrm benchmark # The inpts and outputs are generated according to the following three opti...
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import os import numpy as np from torch.util...
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements()
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # Description: generate inputs and targets for the dlrm benchmark # The inpts and outputs are generated according to the following three opti...
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # Description: an implementation of a deep learning recommendation model (DLRM) # The model input consists of dense and sparse features. The ...
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # # This script performs the visualization of the embedding tables created in # DLRM during the training procedure. We use two popular techni...
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # Description: compile .so from python code from __future__ import absolute_import, division, print_function, unicode_literals from setupto...
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # Description: run dataset pre-processing in standalone mode # WARNING: These steps are required to work with Cython # 1. Instal Cython # > s...
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # Mixed-Dimensions Trick # # Description: Applies mixed dimension trick to embeddings to reduce # embedding sizes. # # References: # [1] Anto...
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # Quotient-Remainder Trick # # Description: Applies quotient remainder-trick to embeddings to reduce # embedding sizes. # # References: # [1]...
import torch # OSS import try: # pyre-ignore[21] # @manual=//ai_codesign/benchmarks/dlrm/torchrec_dlrm/data:dlrm_dataloader from .data.dlrm_dataloader import get_dataloader except ImportError: pass import itertools import os from pyre_extensions import none_throws from torch import distributed as dis...
import argparse from enum import Enum from typing import List class InteractionType(Enum): ORIGINAL = "original" DCN = "dcn" PROJECTION = "projection" def __str__(self): return self.value def parse_args(argv: List[str]) -> argparse.Namespace: parser = argparse.ArgumentParser(description=...
import subprocess import sys import os from pathlib import Path def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements()
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os from typing import List from torch import distributed as dist from torch.utils.data import...
from .dataloader import SuperSloMo from .model_wrapper import Model as ModelWrapper import torch import torch.nn.functional as F import torch.optim as optim import torchvision.transforms as transforms import random from typing import Tuple import os import numpy as np from argparse import Namespace from pathlib import...
from . import slomo_model as model import torch import torchvision import torch.nn as nn import torch.nn.functional as F L1_lossFn = nn.L1Loss() MSE_LossFn = nn.MSELoss() class Model(torch.nn.Module): def __init__(self, device='cpu'): super().__init__() self.flowComp = model.UNet(6, 4).to(device...
#!/usr/bin/env python3 import argparse import os import os.path import ctypes from shutil import rmtree, move from PIL import Image import torch import torchvision.transforms as transforms import slomo_model as model import dataloader import platform from tqdm import tqdm # For parsing commandline arguments parser = a...
#[Super SloMo] ##High Quality Estimation of Multiple Intermediate Frames for Video Interpolation import argparse import torch import torchvision import torchvision.transforms as transforms import torch.optim as optim import torch.nn as nn import torch.nn.functional as F import slomo_model as model from model_wrapper ...
import torch.utils.data as data from PIL import Image import os import os.path import random def _make_dataset(dir): """ Creates a 2D list of all the frames in N clips containing M frames each. 2D List Structure: [[frame00, frame01,...frameM] <-- clip0 [frame00, frame01,...frameM] <-- clip...
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements()
""" Converts a Video to SuperSloMo version """ from time import time import click import cv2 import torch from PIL import Image import numpy as np import slomo_model as model from torchvision import transforms import torch.nn.functional as F torch.set_grad_enabled(False) device = torch.device("cuda" if torch.cuda.is_...
import torch import torchvision import torchvision.transforms as transforms import torch.optim as optim import torch.nn as nn import torch.nn.functional as F import numpy as np class down(nn.Module): """ A class for creating neural network blocks containing layers: Average Pooling --> Convlution + Le...
''' Translate input text with trained model. ''' import torch import argparse import dill as pickle from tqdm import tqdm import transformer.Constants as Constants from torchbenchmark.util.torchtext_legacy.data import Dataset from transformer.Models import Transformer from transformer.Translator import Translator d...
''' Handling the data io ''' import contextlib import os import pathlib import argparse import logging import dill as pickle import urllib from tqdm import tqdm import json import sys import codecs import spacy import torch import tarfile import torchtext.data import torchtext.datasets # Handle torchtext_legacy import...
from argparse import Namespace import math import time import os import dill as pickle from tqdm import tqdm import torch import torch.nn.functional as F import torch.optim as optim from torchbenchmark.util.torchtext_legacy.field import Field from torchbenchmark.util.torchtext_legacy.data import Dataset from torchben...
#!/usr/bin/env python # -*- coding: utf-8 -*- # Author: Rico Sennrich """Use operations learned with learn_bpe.py to encode a new text. The text will not be smaller, but use only a fixed vocabulary, with rare words encoded as variable-length sequences of subword units. Reference: Rico Sennrich, Barry Haddow and Alexa...
''' This script handles the training process. ''' import argparse import math import time import functools import dill as pickle from tqdm import tqdm import torch import torch.nn.functional as F import torch.optim as optim from torchbenchmark.util.torchtext_legacy.field import Field from torchbenchmark.util.torchte...
#!/usr/bin/env python # -*- coding: utf-8 -*- # Author: Rico Sennrich """Use byte pair encoding (BPE) to learn a variable-length encoding of the vocabulary in a text. Unlike the original BPE, it does not compress the plain text, but can be used to reduce the vocabulary of a text to a configurable number of symbols, wi...
import os import sys import subprocess from pathlib import Path def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) def spacy_download(language): subprocess.check_call([sys.executable, '-m', 'spacy', 'download', language]) def prepro...
''' Define the Transformer model ''' import torch import torch.nn as nn import numpy as np from .Layers import EncoderLayer, DecoderLayer __author__ = "Yu-Hsiang Huang" def get_pad_mask(seq, pad_idx : int): return (seq != pad_idx).unsqueeze(-2) def get_subsequent_mask(seq): ''' For masking out the subsequ...
PAD_WORD = '<blank>' UNK_WORD = '<unk>' BOS_WORD = '<s>' EOS_WORD = '</s>'
from . import Constants, Modules, Layers, SubLayers, Models, Translator, Optim
''' Define the sublayers in encoder/decoder layer ''' import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from .Modules import ScaledDotProductAttention from typing import Optional __author__ = "Yu-Hsiang Huang" class MultiHeadAttention(nn.Module): ''' Multi-Head Attention module...
import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional __author__ = "Yu-Hsiang Huang" class ScaledDotProductAttention(nn.Module): ''' Scaled Dot-Product Attention ''' def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature...
''' This module will handle the text generation with beam search. ''' import torch import torch.nn as nn import torch.nn.functional as F from .Models import Transformer, get_pad_mask, get_subsequent_mask class Translator(nn.Module): ''' Load a trained model and translate in beam search fashion. ''' def __in...
''' Define the Layers ''' import torch.nn as nn import torch from .SubLayers import MultiHeadAttention, PositionwiseFeedForward from typing import Optional __author__ = "Yu-Hsiang Huang" class EncoderLayer(nn.Module): ''' Compose with two layers ''' def __init__(self, d_model, d_inner, n_head, d_k, d_v, dr...
'''A wrapper class for scheduled optimizer ''' import numpy as np class ScheduledOptim(): '''A simple wrapper class for learning rate scheduling''' def __init__(self, optimizer, init_lr, d_model, n_warmup_steps): self._optimizer = optimizer self.init_lr = init_lr self.d_model = d_model...
# This file was adapted from # https://github.com/facebookresearch/higher/blob/master/examples/maml-omniglot.py # It comes with the following license. # # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance...
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements()
from torchbenchmark.tasks import NLP from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel class Model(HuggingFaceModel): task = NLP.LANGUAGE_MODELING DEFAULT_TRAIN_BSIZE = 8 DEFAULT_EVAL_BSIZE = 1 def __init__(self, test, device, jit=False, batch_size=None, extra_args=[...
import subprocess import sys import os from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirem...
import os from torchbenchmark.tasks import COMPUTER_VISION from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__))) MODEL_DIR = os.path.abspath(os.path.dirname(__file__)) class Model(Detectron2Model): task = COMPU...
import os from torchbenchmark.util.framework.detectron2 import install_detectron2 MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__))) MODEL_DIR = os.path.abspath(os.path.dirname(__file__)) if __name__ == '__main__': install_detectron2(MODEL_NAME, MODEL_DIR)
"Doctr recognition model" from doctr.models import ocr_predictor import numpy as np import torch # TorchBench imports from torchbenchmark.util.model import BenchmarkModel from torchbenchmark.tasks import COMPUTER_VISION from typing import Tuple class Model(BenchmarkModel): task = COMPUTER_VISION.DETECTION DE...
import os import warnings import subprocess import sys def pip_install_requirements(): try: subprocess.check_call(["conda", "install", "-y", "expecttest", "libglib", "pango", "-c", "conda-forge"]) except: warnings.warn("The doctr_reco_predictor model requires conda binary libaries to be install...
import os from torchbenchmark.tasks import COMPUTER_VISION from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__))) MODEL_DIR = os.path.abspath(os.path.dirname(__file__)) class Model(Detectron2Model): task = COMPUT...
import os from torchbenchmark.util.framework.detectron2 import install_detectron2 MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__))) MODEL_DIR = os.path.abspath(os.path.dirname(__file__)) if __name__ == '__main__': install_detectron2(MODEL_NAME, MODEL_DIR)
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """ Dora the Explorer, special thank to @pierrestock. """ import argparse import json import logging import shlex import sub...
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """ Run training locally on all visible GPUs. Start only one task per node as this script will spawn ...
import torch import sys a = torch.load(sys.argv[1]) b = torch.load(sys.argv[2]) torch.testing.assert_allclose(a,b, rtol=0.01, atol=0.01)
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """ Quantize a pre-trained model. Just pass the path to the model to this script and it will save a gzipped compressed versi...
import json import torch import random import numpy as np from fractions import Fraction from .demucs.model import Demucs from .demucs.parser import get_name, get_parser from .demucs.augment import FlipChannels, FlipSign, Remix, Shift from .demucs.utils import capture_init, center_trim from ...util.model import Benchm...
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import argparse import gzip import json import sys from collections import defaultdict from pathlib import Path import num...
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import argparse import json from collections import defaultdict from pathlib import Path import numpy as np import treetab...
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) def spacy_download(language): pass def preprocess(): pass if __name__ == '__main__': pip_install_requirements() spacy_download('') pre...
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """ Run training from Slurm on all visible GPUs. Start only one task per node as this script will spa...
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import argparse import os from collections import defaultdict, namedtuple from pathlib import Path import musdb import num...
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import json from concurrent import futures import musdb from .audio import AudioFile def get_musdb_tracks(root, *args, ...
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # # Created on 2018/12 # Author: Kaituo XU # Modified on 2019/11 by Alexandre Defossez, added support for multiple output ch...
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree.
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import argparse import hashlib import sys from pathlib import Path import requests import torch as th import tqdm from sci...
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import gzip import sys from concurrent import futures import musdb import museval import torch as th import tqdm from scip...
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch import torch as th from torch import nn class Shift(nn.Module): """ Randomly shift audio in time by ...
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch as th from torch import Tensor, nn from .utils import capture_init, center_trim fro...
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import argparse import os from pathlib import Path def get_parser(): parser = argparse.ArgumentParser("demucs", descr...
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import errno import functools import gzip import os import random import socket import tempfile import warnings from contex...
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import sys import tqdm from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler...
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import json import subprocess as sp from pathlib import Path import numpy as np import torch from .utils import temp_filen...
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import json import os import sys import time from dataclasses import dataclass, field from fractions import Fraction impor...
from torchbenchmark.tasks import NLP from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel class Model(HuggingFaceModel): task = NLP.LANGUAGE_MODELING DEFAULT_TRAIN_BSIZE = 8 DEFAULT_EVAL_BSIZE = 1 def __init__(self, test, device, jit=False, batch_size=None, extra_args=[...
import subprocess import sys import os from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirem...
import torch import sys a = torch.load(sys.argv[1]) b = torch.load(sys.argv[2]) torch.testing.assert_allclose(a,b, rtol=0.01, atol=0.01)
import argparse import torch.distributed as dist import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler from torch.utils.tensorboard import SummaryWriter from .test import test # import test.py to get mAP after each epoch from .yolo_models import * from .yolo_utils.datasets import * from .yolo_u...
#!/usr/bin/env python # Make all randomness deterministic import random import argparse import torch import os import numpy as np from contextlib import nullcontext torch.backends.cudnn.deterministic = False torch.backends.cudnn.benchmark = True from shlex import split from .yolo_train import prepare_training_loop f...
import argparse import json from torch.utils.data import DataLoader from .yolo_models import * from .yolo_utils.datasets import * from .yolo_utils.utils import * import os.path def test(cfg, data, weights=None, batch_size=16, imgsz=416, conf_thres=0.001, iou_thr...
import subprocess import sys import os from pathlib import Path def setup_data_dir(): current_dir = Path(os.path.dirname(os.path.realpath(__file__))) coco128_data_dir = os.path.join(current_dir.parent.parent, "data", ".data", "coco128") assert os.path.exists(coco128_data_dir), "Couldn't find coco128 data d...
from .yolo_utils.google_utils import * from .yolo_utils.layers import * from .yolo_utils.parse_config import * ONNX_EXPORT = False def create_modules(module_defs, img_size, cfg): # Constructs module list of layer blocks from module configuration in module_defs img_size = [img_size] * 2 if isinstance(img_siz...
import argparse from models import * # set ONNX_EXPORT in models.py from utils.datasets import * from utils.utils import * def detect(save_img=False): imgsz = (320, 192) if ONNX_EXPORT else opt.img_size # (320, 192) or (416, 256) or (608, 352) for (height, width) out, source, weights, half, view_img, save_...
import glob import math import os import random import shutil import time from pathlib import Path from threading import Thread import cv2 import numpy as np import torch from PIL import Image, ExifTags from torch.utils.data import Dataset from .utils import xyxy2xywh, xywh2xyxy help_url = 'https://github.com/ultral...