id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
3,366 | from __future__ import print_function
import numpy as np
import torch
from PIL import Image
import os
import importlib
import argparse
from argparse import Namespace
import torchvision
def copyconf(default_opt, **kwargs):
conf = Namespace(**vars(default_opt))
for key in kwargs:
setattr(conf, key, kwarg... | null |
3,367 | from __future__ import print_function
import numpy as np
import torch
from PIL import Image
import os
import importlib
import argparse
from argparse import Namespace
import torchvision
def genvalconf(train_opt, **kwargs):
conf = Namespace(**vars(train_opt))
attr_dict = train_opt.__dict__
for key, value in ... | null |
3,368 | from __future__ import print_function
import numpy as np
import torch
from PIL import Image
import os
import importlib
import argparse
from argparse import Namespace
import torchvision
def find_class_in_module(target_cls_name, module):
target_cls_name = target_cls_name.replace('_', '').lower()
clslib = importl... | null |
3,369 | from __future__ import print_function
import numpy as np
import torch
from PIL import Image
import os
import importlib
import argparse
from argparse import Namespace
import torchvision
The provided code snippet includes necessary dependencies for implementing the `diagnose_network` function. Write a Python function `d... | Calculate and print the mean of average absolute(gradients) Parameters: net (torch network) -- Torch network name (str) -- the name of the network |
3,370 | from __future__ import print_function
import numpy as np
import torch
from PIL import Image
import os
import importlib
import argparse
from argparse import Namespace
import torchvision
The provided code snippet includes necessary dependencies for implementing the `print_numpy` function. Write a Python function `def pr... | Print the mean, min, max, median, std, and size of a numpy array Parameters: val (bool) -- if print the values of the numpy array shp (bool) -- if print the shape of the numpy array |
3,371 | from __future__ import print_function
import numpy as np
import torch
from PIL import Image
import os
import importlib
import argparse
from argparse import Namespace
import torchvision
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory pat... | create empty directories if they don't exist Parameters: paths (str list) -- a list of directory paths |
3,372 | from __future__ import print_function
import numpy as np
import torch
from PIL import Image
import os
import importlib
import argparse
from argparse import Namespace
import torchvision
def correct_resize_label(t, size):
device = t.device
t = t.detach().cpu()
resized = []
for i in range(t.size(0)):
... | null |
3,373 | from __future__ import print_function
import numpy as np
import torch
from PIL import Image
import os
import importlib
import argparse
from argparse import Namespace
import torchvision
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_im... | null |
3,374 | from __future__ import print_function
import numpy as np
import torch
from PIL import Image
import os
import importlib
import argparse
from argparse import Namespace
import torchvision
The provided code snippet includes necessary dependencies for implementing the `draw_landmarks` function. Write a Python function `def... | Return: img -- numpy.array, (B, H, W, 3) img with landmark, RGB order, range (0, 255) Parameters: img -- numpy.array, (B, H, W, 3), RGB order, range (0, 255) landmark -- numpy.array, (B, 68, 2), y direction is opposite to v direction color -- str, 'r' or 'b' (red or blue) |
3,375 | import numpy as np
from scipy.io import loadmat
from PIL import Image
import cv2
import os
from skimage import transform as trans
import torch
import warnings
def POS(xp, x):
npts = xp.shape[1]
A = np.zeros([2*npts, 8])
A[0:2*npts-1:2, 0:3] = x.transpose()
A[0:2*npts-1:2, 3] = 1
A[1:2*npts:2, 4:7]... | Return: transparams --numpy.array (raw_W, raw_H, scale, tx, ty) img_new --PIL.Image (target_size, target_size, 3) lm_new --numpy.array (68, 2), y direction is opposite to v direction mask_new --PIL.Image (target_size, target_size) Parameters: img --PIL.Image (raw_H, raw_W, 3) lm --numpy.array (68, 2), y direction is op... |
3,376 | import math
import numpy as np
import os
import cv2
def skinmask(imbgr):
im = _bgr2ycbcr(imbgr)
data = im.reshape((-1,3))
lh_skin = gmm_skin.likelihood(data)
lh_nonskin = gmm_nonskin.likelihood(data)
tmp1 = prior_skin * lh_skin
tmp2 = prior_nonskin * lh_nonskin
post_skin = tmp1 / (tmp1+tmp2)... | null |
3,377 | import os
import cv2
import numpy as np
from scipy.io import loadmat
import tensorflow as tf
from util.preprocess import align_for_lm
from shutil import move
def load_lm_graph(graph_filename):
with tf.gfile.GFile(graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read... | null |
3,378 | import os
import cv2
import numpy as np
from scipy.io import loadmat
import tensorflow as tf
from util.preprocess import align_for_lm
from shutil import move
mean_face = np.loadtxt('util/test_mean_face.txt')
mean_face = mean_face.reshape([68, 2])
def save_label(labels, save_path):
np.savetxt(save_path, labels)
def ... | null |
3,379 | import numpy as np
import os
import sys
import ntpath
import time
from . import util, html
from subprocess import Popen, PIPE
from torch.utils.tensorboard import SummaryWriter
The provided code snippet includes necessary dependencies for implementing the `save_images` function. Write a Python function `def save_images... | Save images to the disk. Parameters: webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details) visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs image_path (str) -- the string is used to create image paths aspect_ra... |
3,380 | import numpy as np
from PIL import Image
from scipy.io import loadmat, savemat
from array import array
import os.path as osp
def LoadExpBasis(bfm_folder='BFM'):
def transferBFM09(bfm_folder='BFM'):
print('Transfer BFM09 to BFM_model_front......')
original_BFM = loadmat(osp.join(bfm_folder, '01_MorphableModel.m... | null |
3,381 | import numpy as np
from PIL import Image
from scipy.io import loadmat, savemat
from array import array
import os.path as osp
def load_lm3d(bfm_folder):
Lm3D = loadmat(osp.join(bfm_folder, 'similarity_Lm3D_all.mat'))
Lm3D = Lm3D['lm']
# calculate 5 facial landmarks using 68 landmarks
lm_idx = np.array... | null |
3,382 | import os
def write_list(lms_list, imgs_list, msks_list, mode='train',save_folder='datalist', save_name=''):
save_path = os.path.join(save_folder, mode)
if not os.path.isdir(save_path):
os.makedirs(save_path)
with open(os.path.join(save_path, save_name + 'landmarks.txt'), 'w') as fd:
fd.wri... | null |
3,383 | import os
def check_list(rlms_list, rimgs_list, rmsks_list):
lms_list, imgs_list, msks_list = [], [], []
for i in range(len(rlms_list)):
flag = 'false'
lm_path = rlms_list[i]
im_path = rimgs_list[i]
msk_path = rmsks_list[i]
if os.path.isfile(lm_path) and os.path.isfile(i... | null |
3,384 | import os
import cv2
import time
import glob
import argparse
import face_alignment
import numpy as np
from PIL import Image
from tqdm import tqdm
from itertools import cycle
from torch.multiprocessing import Pool, Process, set_start_method
class KeypointExtractor():
def __init__(self, device):
self.detector... | null |
3,385 | import os
import numpy as np
from PIL import Image
from skimage import img_as_float32, transform
import torch
import scipy.io as scio
from glob import glob
def transform_semantic_1(semantic, semantic_radius):
semantic_list = [semantic for i in range(0, semantic_radius * 2 + 1)]
coeff_3dmm = np.concatenate(seman... | null |
3,386 | import os
import numpy as np
from PIL import Image
from skimage import img_as_float32, transform
import torch
import scipy.io as scio
from glob import glob
def gen_camera_pose(camera_degree_list, frame_num, batch_size):
new_degree_list = []
if len(camera_degree_list) == 1:
for _ in range(frame_num):
... | null |
3,387 | import torch
import torch.nn.functional as F
from torch import nn
from src.audio2pose_models.res_unet import ResUnet
def class2onehot(idx, class_num):
assert torch.max(idx).item() < class_num
onehot = torch.zeros(idx.size(0), class_num).to(idx.device)
onehot.scatter_(1, idx, 1)
return onehot | null |
3,388 | from torch import nn
import torch.nn.functional as F
import torch
from src.facerender.sync_batchnorm import SynchronizedBatchNorm2d as BatchNorm2d
from src.facerender.sync_batchnorm import SynchronizedBatchNorm3d as BatchNorm3d
import torch.nn.utils.spectral_norm as spectral_norm
def make_coordinate_grid(spatial_size, ... | Transform a keypoint into gaussian like representation |
3,389 | from torch import nn
import torch.nn.functional as F
import torch
from src.facerender.sync_batchnorm import SynchronizedBatchNorm2d as BatchNorm2d
from src.facerender.sync_batchnorm import SynchronizedBatchNorm3d as BatchNorm3d
import torch.nn.utils.spectral_norm as spectral_norm
The provided code snippet includes nec... | Create a meshgrid [-1,1] x [-1,1] of given spatial_size. |
3,390 | from scipy.spatial import ConvexHull
import torch
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False,
use_relative_movement=False, use_relative_jacobian=False):
if adapt_movement_scale:
... | null |
3,391 | from scipy.spatial import ConvexHull
import torch
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
def keypoint_transformation(kp_canonical, he, wo_exp=False):
kp = kp_canonical['value'] # (bs, k, 3)
yaw, pitch, roll = he['yaw'], he['pitch'], he['roll']
yaw = headpose_pred_to_degree... | null |
3,392 | import collections
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
from .comm import SyncMaster
The provided code snippet includes necessary dependencies for implementing the `_sum_ft` function. Write ... | sum over the first and last dimention |
3,393 | import collections
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
from .comm import SyncMaster
The provided code snippet includes necessary dependencies for implementing the `_unsqueeze_ft` function. ... | add new dementions at the front and the tail |
3,394 | import functools
from torch.nn.parallel.data_parallel import DataParallel
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx... | Monkey-patch an existing `DataParallel` object. Add the replication callback. Useful when you have customized `DataParallel` implementation. Examples: > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) > patch_replication_callback(sync_bn) # this is equi... |
3,395 | import os
from tqdm import tqdm
import torch
import numpy as np
import random
import scipy.io as scio
import src.utils.audio as audio
def generate_blink_seq(num_frames):
ratio = np.zeros((num_frames,1))
frame_id = 0
while frame_id in range(num_frames):
start = 80
if frame_id+start+9<=num_fr... | null |
3,396 | import os
from tqdm import tqdm
import torch
import numpy as np
import random
import scipy.io as scio
import src.utils.audio as audio
def crop_pad_audio(wav, audio_length):
def parse_audio_length(audio_length, sr, fps):
def generate_blink_seq_randomly(num_frames):
def get_data(first_coeff_path, audio_path, device):
... | null |
3,397 | import os, sys
import cv2
import glob
import shutil
import numpy as np
from tqdm import tqdm
from imageio import imread, imsave
from src.dain_model.base_predictor import BasePredictor
def video2frames(video_path, outpath, **kargs):
def _dict2str(kargs):
cmd_str = ''
for k, v in kargs.items():
... | null |
3,398 | import os, sys
import cv2
import glob
import shutil
import numpy as np
from tqdm import tqdm
from imageio import imread, imsave
from src.dain_model.base_predictor import BasePredictor
def frames2video(frame_path, video_path, r, w, h):
out = cv2.VideoWriter(video_path, cv2.VideoWriter_fourcc(*'DIVX'), int(r), (w, h... | null |
3,399 | import os
import cv2
import time
import glob
import argparse
import scipy
import numpy as np
from PIL import Image
from tqdm import tqdm
from itertools import cycle
from torch.multiprocessing import Pool, Process, set_start_method
import numpy as np
from PIL import Image
import dlib
def create_video(video_name, frames... | null |
3,400 | import os
import cv2
import time
import glob
import argparse
import scipy
import numpy as np
from PIL import Image
from tqdm import tqdm
from itertools import cycle
from torch.multiprocessing import Pool, Process, set_start_method
import numpy as np
from PIL import Image
import dlib
class Croper:
def __init__(self,... | null |
3,401 | import os
import cv2
import time
import glob
import argparse
import scipy
import numpy as np
from PIL import Image
from tqdm import tqdm
from itertools import cycle
from torch.multiprocessing import Pool, Process, set_start_method
import numpy as np
from PIL import Image
import dlib
def get_data_path(video_dir):
e... | null |
3,402 | import os
import cv2
import time
import glob
import argparse
import scipy
import numpy as np
from PIL import Image
from tqdm import tqdm
from itertools import cycle
from torch.multiprocessing import Pool, Process, set_start_method
import numpy as np
from PIL import Image
import dlib
def get_wra_data_path(video_dir):
... | null |
3,403 | import shutil
import uuid
import os
import cv2
def save_video_with_watermark(video, audio, save_path, watermark=False):
temp_file = str(uuid.uuid4())+'.mp4'
cmd = r'ffmpeg -y -i "%s" -i "%s" -vcodec copy "%s"' % (video, audio, temp_file)
os.system(cmd)
if watermark is False:
shutil.move(temp_f... | null |
3,404 | import cv2, os
import numpy as np
from tqdm import tqdm
import uuid
from src.inference_utils import Laplacian_Pyramid_Blending_with_mask
def Laplacian_Pyramid_Blending_with_mask(A, B, m, num_levels=6):
def paste_pic(video_path, pic_path, crop_info, new_audio_path, full_video_path, restorer, enhancer, enhancer_region)... | null |
3,405 | import numpy as np
import cv2, os, torch
from tqdm import tqdm
from PIL import Image
from src.face3d.util.preprocess import align_img
from src.face3d.util.load_mats import load_lm3d
from src.face3d.models import networks
from src.face3d.extract_kp_videos import KeypointExtractor
from scipy.io import savemat
from src.ut... | Return: coeffs_dict -- a dict of torch.tensors Parameters: coeffs -- torch.tensor, size (B, 256) |
3,406 | import librosa
import librosa.filters
import numpy as np
from scipy import signal
from scipy.io import wavfile
from src.utils.hparams import hparams as hp
def save_wav(wav, path, sr):
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
#proposed by @dsmiller
wavfile.write(path, sr, wav.astype(np.int16)) | null |
3,407 | import librosa
import librosa.filters
import numpy as np
from scipy import signal
from scipy.io import wavfile
from src.utils.hparams import hparams as hp
def save_wavenet_wav(wav, path, sr):
librosa.output.write_wav(path, wav, sr=sr) | null |
3,408 | import librosa
import librosa.filters
import numpy as np
from scipy import signal
from scipy.io import wavfile
from src.utils.hparams import hparams as hp
def inv_preemphasis(wav, k, inv_preemphasize=True):
if inv_preemphasize:
return signal.lfilter([1], [1, -k], wav)
return wav | null |
3,409 | import librosa
import librosa.filters
import numpy as np
from scipy import signal
from scipy.io import wavfile
from src.utils.hparams import hparams as hp
def preemphasis(wav, k, preemphasize=True):
if preemphasize:
return signal.lfilter([1, -k], [1], wav)
return wav
def _stft(y):
if hp.use_lws:
... | null |
3,410 | import librosa
import librosa.filters
import numpy as np
from scipy import signal
from scipy.io import wavfile
from src.utils.hparams import hparams as hp
def num_frames(length, fsize, fshift):
"""Compute number of time frames of spectrogram
"""
pad = (fsize - fshift)
if length % fshift == 0:
M ... | Compute left and right padding |
3,411 | import librosa
import librosa.filters
import numpy as np
from scipy import signal
from scipy.io import wavfile
from src.utils.hparams import hparams as hp
def librosa_pad_lr(x, fsize, fshift):
return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0] | null |
3,412 | import librosa
import librosa.filters
import numpy as np
from scipy import signal
from scipy.io import wavfile
from src.utils.hparams import hparams as hp
def _db_to_amp(x):
return np.power(10.0, (x) * 0.05) | null |
3,413 | import librosa
import librosa.filters
import numpy as np
from scipy import signal
from scipy.io import wavfile
from src.utils.hparams import hparams as hp
def _denormalize(D):
if hp.allow_clipping_in_normalization:
if hp.symmetric_mels:
return (((np.clip(D, -hp.max_abs_value,
... | null |
3,414 | import os
import torch
from gfpgan import GFPGANer
from tqdm import tqdm
from src.utils.videoio import load_video_to_cv2
def load_video_to_cv2(input_path):
video_stream = cv2.VideoCapture(input_path)
fps = video_stream.get(cv2.CAP_PROP_FPS)
full_frames = []
while 1:
still_reading, frame = vid... | null |
3,415 | from glob import glob
import os
hparams = HParams(
num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality
# network
rescale=True, # Whether to rescale audio prior to preprocessing
rescaling_max=0.9, # Rescaling value
# Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT a... | null |
3,416 | import torch.nn as nn
from basicsr.utils.registry import ARCH_REGISTRY
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(inplanes, outplanes, stride=1)` to solve the following problem:
A simple wrapper for 3x3 convolution with paddin... | A simple wrapper for 3x3 convolution with padding. Args: inplanes (int): Channel number of inputs. outplanes (int): Channel number of outputs. stride (int): Stride in convolution. Default: 1. |
3,417 | from face_parse.blocks import *
import torch
from torch import nn
import numpy as np
class ParseNet(nn.Module):
def __init__(self,
in_size=128,
out_size=128,
min_feat_size=32,
base_ch=64,
parsing_ch=19,
res_depth=10,
... | null |
3,418 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as modelzoo
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1)` to solve the following problem:
3x3 convo... | 3x3 convolution with padding |
3,419 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as modelzoo
class BasicBlock(nn.Module):
def __init__(self, in_chan, out_chan, stride=1):
def forward(self, x):
def create_layer_basic(in_chan, out_chan, bnum, stride=1):
layers = [BasicBlock(in_chan, out_chan... | null |
3,420 | import cv2
import numpy as np
import os.path as path
import dlib
import os
def boundary_points(points, width_percent=0.1, height_percent=0.1):
""" Produce additional boundary points
:param points: *m* x 2 array of x,y points
:param width_percent: [-1, 1] percentage of width to taper inwards. Negative for opposite... | Locates 77 face points using stasm (http://www.milbo.users.sonic.net/stasm) :param img: an image array :param add_boundary_points: bool to add 2 additional points :returns: Array of x,y face points. Empty array if no face found |
3,421 | import cv2
import numpy as np
import scipy.sparse
The provided code snippet includes necessary dependencies for implementing the `apply_mask` function. Write a Python function `def apply_mask(img, mask)` to solve the following problem:
Apply mask to supplied image :param img: max 3 channel image :param mask: [0-255] v... | Apply mask to supplied image :param img: max 3 channel image :param mask: [0-255] values in mask :returns: new image with mask applied |
3,422 | import cv2
import numpy as np
import scipy.sparse
def alpha_feathering(src_img, dest_img, img_mask, blur_radius=15):
mask = cv2.blur(img_mask, (blur_radius, blur_radius))
mask = mask / 255.0
result_img = np.empty(src_img.shape, np.uint8)
for i in range(3):
result_img[..., i] = src_img[..., i] * mask + des... | null |
3,423 | import cv2
import numpy as np
def check_write_video(func):
def inner(self, *args, **kwargs):
if self.video:
return func(self, *args, **kwargs)
else:
pass
return inner | null |
3,424 | import numpy as np
import scipy.spatial as spatial
def warp_image(src_img, src_points, dest_points, dest_shape, dtype=np.uint8):
# Resultant image will not have an alpha channel
num_chans = 3
src_img = src_img[:, :, :3]
rows, cols = dest_shape[:2]
result_img = np.zeros((rows, cols, num_chans), dtype)
delaun... | null |
3,425 | from docopt import docopt
import os
import numpy as np
import cv2
from facemorpher import locator
from facemorpher import aligner
from facemorpher import warper
from facemorpher import blender
from facemorpher import plotter
from facemorpher import videoer
def verify_args(args):
if args['--images'] is None:
vali... | null |
3,426 | from docopt import docopt
import os
import numpy as np
import cv2
from facemorpher import locator
from facemorpher import aligner
from facemorpher import warper
from facemorpher import blender
from facemorpher import plotter
from facemorpher import videoer
def list_imgpaths(images_folder=None, src_image=None, dest_ima... | null |
3,427 | from docopt import docopt
import os
import numpy as np
import cv2
from facemorpher import locator
from facemorpher import aligner
from facemorpher import warper
from facemorpher import blender
from facemorpher import plotter
from facemorpher import videoer
def load_valid_image_points(imgpaths, size):
for path in imgp... | Create a morph sequence from multiple images in imgpaths :param imgpaths: array or generator of image paths |
3,428 | from docopt import docopt
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from facemorpher import locator
from facemorpher import aligner
from facemorpher import warper
from facemorpher import blender
from facemorpher import plotter
def list_imgpaths(imgfolder):... | null |
3,429 | from docopt import docopt
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from facemorpher import locator
from facemorpher import aligner
from facemorpher import warper
from facemorpher import blender
from facemorpher import plotter
def sharpen(img):
blured = ... | null |
3,430 | from docopt import docopt
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from facemorpher import locator
from facemorpher import aligner
from facemorpher import warper
from facemorpher import blender
from facemorpher import plotter
def load_image_points(path, si... | null |
3,431 | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os.path
import numpy as np
import cv2
def bgr2rgb(img):
# OpenCV's BGR to RGB
rgb = np.copy(img)
rgb[..., 0], rgb[..., 2] = img[..., 2], img[..., 0]
return rgb | null |
3,432 | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os.path
import numpy as np
import cv2
def check_do_plot(func):
def inner(self, *args, **kwargs):
if self.do_plot:
func(self, *args, **kwargs)
return inner | null |
3,433 | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os.path
import numpy as np
import cv2
def check_do_save(func):
def inner(self, *args, **kwargs):
if self.do_save:
func(self, *args, **kwargs)
return inner | null |
3,434 | import os
import platform
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.utils.cpp_extension import load, _import_module_from_library
if platform.system() == 'Linux' and torch.cuda.is_available():
module_path = os.path.dirname(__file__)
upfirdn2d_op = load(
'... | null |
3,435 | import os
import platform
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.utils.cpp_extension import load, _import_module_from_library
if platform.system() == 'Linux' and torch.cuda.is_available():
module_path = os.path.dirname(__file__)
fused = l... | null |
3,436 | import math
import random
import functools
import operator
import itertools
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Function
from face_model.op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
... | null |
3,437 | import cv2
import numpy as np
from skimage import transform as trans
REFERENCE_FACIAL_POINTS = [
[30.29459953, 51.69630051],
[65.53179932, 51.50139999],
[48.02519989, 71.73660278],
[33.54930115, 92.3655014],
[62.72990036, 92.20410156]
]
def _umeyama(src, dst, estimate_scale=True, scale=1.0):
"""... | null |
3,438 | import time
import torch
import torch.nn as nn
import torchvision.models._utils as _utils
import torchvision.models as models
import torch.nn.functional as F
from torch.autograd import Variable
def conv_bn(inp, oup, stride = 1, leaky = 0):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False)... | null |
3,439 | import time
import torch
import torch.nn as nn
import torchvision.models._utils as _utils
import torchvision.models as models
import torch.nn.functional as F
from torch.autograd import Variable
def conv_bn_no_relu(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
... | null |
3,440 | import time
import torch
import torch.nn as nn
import torchvision.models._utils as _utils
import torchvision.models as models
import torch.nn.functional as F
from torch.autograd import Variable
def conv_bn1X1(inp, oup, stride, leaky=0):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, stride, padding=0, bias=F... | null |
3,441 | import time
import torch
import torch.nn as nn
import torchvision.models._utils as _utils
import torchvision.models as models
import torch.nn.functional as F
from torch.autograd import Variable
def conv_dw(inp, oup, stride, leaky=0.1):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bia... | null |
3,442 | import os
import os.path
import sys
import torch
import torch.utils.data as data
import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `detection_collate` function. Write a Python function `def detection_collate(batch)` to solve the following problem:
Custom colla... | Custom collate fn for dealing with batches of images that have a different number of associated object annotations (bounding boxes). Arguments: batch: (tuple) A tuple of tensor images and lists of annotations Return: A tuple containing: 1) (tensor) batch of images stacked on their 0 dim 2) (list of tensors) annotations... |
3,443 | import cv2
import numpy as np
import random
from face_detect.utils.box_utils import matrix_iof
def matrix_iof(a, b):
"""
return iof of a and b, numpy version for data augenmentation
"""
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.... | null |
3,444 | import cv2
import numpy as np
import random
from face_detect.utils.box_utils import matrix_iof
def _distort(image):
def _convert(image, alpha=1, beta=0):
tmp = image.astype(float) * alpha + beta
tmp[tmp < 0] = 0
tmp[tmp > 255] = 255
image[:] = tmp
image = image.copy()
if ... | null |
3,445 | import cv2
import numpy as np
import random
from face_detect.utils.box_utils import matrix_iof
def _expand(image, boxes, fill, p):
if random.randrange(2):
return image, boxes
height, width, depth = image.shape
scale = random.uniform(1, p)
w = int(scale * width)
h = int(scale * height)
... | null |
3,446 | import cv2
import numpy as np
import random
from face_detect.utils.box_utils import matrix_iof
def _mirror(image, boxes, landms):
_, width, _ = image.shape
if random.randrange(2):
image = image[:, ::-1]
boxes = boxes.copy()
boxes[:, 0::2] = width - boxes[:, 2::-2]
# landm
... | null |
3,447 | import cv2
import numpy as np
import random
from face_detect.utils.box_utils import matrix_iof
def _pad_to_square(image, rgb_mean, pad_image_flag):
if not pad_image_flag:
return image
height, width, _ = image.shape
long_side = max(width, height)
image_t = np.empty((long_side, long_side, 3), dty... | null |
3,448 | import cv2
import numpy as np
import random
from face_detect.utils.box_utils import matrix_iof
def _resize_subtract_mean(image, insize, rgb_mean):
interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
interp_method = interp_methods[random.randrange(5)]
... | null |
3,449 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `py_cpu_nms` function. Write a Python function `def py_cpu_nms(dets, thresh)` to solve the following problem:
Pure Python NMS baseline.
Here is the function:
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline... | Pure Python NMS baseline. |
3,450 | import torch
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `center_size` function. Write a Python function `def center_size(boxes)` to solve the following problem:
Convert prior_boxes to (cx, cy, w, h) representation for comparison to center-size form ground truth da... | Convert prior_boxes to (cx, cy, w, h) representation for comparison to center-size form ground truth data. Args: boxes: (tensor) point_form boxes Return: boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes. |
3,451 | import torch
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `matrix_iou` function. Write a Python function `def matrix_iou(a, b)` to solve the following problem:
return iou of a and b, numpy version for data augenmentation
Here is the function:
def matrix_iou(a, b):... | return iou of a and b, numpy version for data augenmentation |
3,452 | import torch
import numpy as np
def point_form(boxes):
""" Convert prior_boxes to (xmin, ymin, xmax, ymax)
representation for comparison to point form ground truth data.
Args:
boxes: (tensor) center-size default boxes from priorbox layers.
Return:
boxes: (tensor) Converted xmin, ymin, xm... | Match each prior box with the ground truth box of the highest jaccard overlap, encode the bounding boxes, then return the matched indices corresponding to both confidence and location preds. Args: threshold: (float) The overlap threshold used when mathing boxes. truths: (tensor) Ground truth boxes, Shape: [num_obj, 4].... |
3,453 | import torch
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `decode` function. Write a Python function `def decode(loc, priors, variances)` to solve the following problem:
Decode locations from predictions using priors to undo the encoding we did for offset regression... | Decode locations from predictions using priors to undo the encoding we did for offset regression at train time. Args: loc (tensor): location predictions for loc layers, Shape: [num_priors,4] priors (tensor): Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Retur... |
3,454 | import torch
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `decode_landm` function. Write a Python function `def decode_landm(pre, priors, variances)` to solve the following problem:
Decode landm from predictions using priors to undo the encoding we did for offset re... | Decode landm from predictions using priors to undo the encoding we did for offset regression at train time. Args: pre (tensor): landm predictions for loc layers, Shape: [num_priors,10] priors (tensor): Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: dec... |
3,455 | import torch
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `log_sum_exp` function. Write a Python function `def log_sum_exp(x)` to solve the following problem:
Utility function for computing log_sum_exp while determining This will be used to determine unaveraged conf... | Utility function for computing log_sum_exp while determining This will be used to determine unaveraged confidence loss across all examples in a batch. Args: x (Variable(tensor)): conf_preds from conf layers |
3,456 | import torch
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `nms` function. Write a Python function `def nms(boxes, scores, overlap=0.5, top_k=200)` to solve the following problem:
Apply non-maximum suppression at test time to avoid detecting too many overlapping boun... | Apply non-maximum suppression at test time to avoid detecting too many overlapping bounding boxes for a given object. Args: boxes: (tensor) The location preds for the img, Shape: [num_priors,4]. scores: (tensor) The class predscores for the img, Shape:[num_priors]. overlap: (float) The overlap thresh for suppressing un... |
3,457 | from typing import Tuple, List, Union, Iterable
import numpy as np
import torch
import torch.nn.functional as F
from transformers import PreTrainedTokenizer
from transformers import logging
from transformers.generation import LogitsProcessor
BatchTokensType = List[List[int]]
def pad_batch(batch: BatchTokensType, pad_i... | null |
3,458 | from typing import Tuple, List, Union, Iterable
import numpy as np
import torch
import torch.nn.functional as F
from transformers import PreTrainedTokenizer
from transformers import logging
from transformers.generation import LogitsProcessor
def get_ltor_masks_and_position_ids(
data,
eod_token,
reset_positi... | Generate batch from context tokens. |
3,459 | from typing import Tuple, List, Union, Iterable
import numpy as np
import torch
import torch.nn.functional as F
from transformers import PreTrainedTokenizer
from transformers import logging
from transformers.generation import LogitsProcessor
def get_stop_words_ids(chat_format, tokenizer):
if chat_format == "raw":
... | null |
3,460 | from typing import Tuple, List, Union, Iterable
import numpy as np
import torch
import torch.nn.functional as F
from transformers import PreTrainedTokenizer
from transformers import logging
from transformers.generation import LogitsProcessor
def make_context(
tokenizer: PreTrainedTokenizer,
query: str,
his... | null |
3,461 | from typing import Tuple, List, Union, Iterable
import numpy as np
import torch
import torch.nn.functional as F
from transformers import PreTrainedTokenizer
from transformers import logging
from transformers.generation import LogitsProcessor
TokensType = List[int]
def _decode_default(
tokens: List[int],
*,
... | null |
3,462 | from typing import Tuple, List, Union, Iterable
import numpy as np
import torch
import torch.nn.functional as F
from transformers import PreTrainedTokenizer
from transformers import logging
from transformers.generation import LogitsProcessor
The provided code snippet includes necessary dependencies for implementing th... | This function has been mostly taken from huggingface conversational ai code at https://medium.com/huggingface/how-to-build-a-state-of-the-art- conversational-ai-with-transfer-learning-2d818ac26313 |
3,463 | from typing import Tuple, List, Union, Iterable
import numpy as np
import torch
import torch.nn.functional as F
from transformers import PreTrainedTokenizer
from transformers import logging
from transformers.generation import LogitsProcessor
def switch(val1, val2, boolean):
boolean = boolean.type_as(val1)
retu... | null |
3,464 | import importlib
import math
from typing import TYPE_CHECKING, Optional, Tuple, Union, Callable, List, Any, Generator
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch.cuda.amp import autocast
from torch.nn import CrossEntropyLoss
from transformers import PreTrainedTokenizer, Generat... | Make causal mask used for bi-directional self-attention. |
3,465 | import importlib
import math
from typing import TYPE_CHECKING, Optional, Tuple, Union, Callable, List, Any, Generator
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch.cuda.amp import autocast
from torch.nn import CrossEntropyLoss
from transformers import PreTrainedTokenizer, Generat... | Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.