id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
15,549 | import torch.cuda as cuda
import torch.nn as nn
import torch
import collections
from torch.nn.parallel._functions import Gather
def user_scattered_collate(batch):
return batch | null |
15,550 | import torch.cuda as cuda
import torch.nn as nn
import torch
import collections
from torch.nn.parallel._functions import Gather
def async_copy_to(obj, dev, main_stream=None):
if torch.is_tensor(obj):
v = obj.cuda(dev, non_blocking=True)
if main_stream is not None:
v.data.record_stream(main_stream)
return v
elif isinstance(obj, collections.Mapping):
return {k: async_copy_to(o, dev, main_stream) for k, o in obj.items()}
elif isinstance(obj, collections.Sequence):
return [async_copy_to(o, dev, main_stream) for o in obj]
else:
return obj
def _async_copy(inputs, device_ids):
nr_devs = len(device_ids)
assert type(inputs) in (tuple, list)
assert len(inputs) == nr_devs
outputs = []
for i, dev in zip(inputs, device_ids):
with cuda.device(dev):
outputs.append(async_copy_to(i, dev))
return tuple(outputs) | null |
15,551 | import torch.cuda as cuda
import torch.nn as nn
import torch
import collections
from torch.nn.parallel._functions import Gather
def async_copy_to(obj, dev, main_stream=None):
if torch.is_tensor(obj):
v = obj.cuda(dev, non_blocking=True)
if main_stream is not None:
v.data.record_stream(main_stream)
return v
elif isinstance(obj, collections.Mapping):
return {k: async_copy_to(o, dev, main_stream) for k, o in obj.items()}
elif isinstance(obj, collections.Sequence):
return [async_copy_to(o, dev, main_stream) for o in obj]
else:
return obj
def _get_stream(device):
"""Gets a background stream for copying between CPU and GPU"""
global _streams
if device == -1:
return None
if _streams is None:
_streams = [None] * cuda.device_count()
if _streams[device] is None: _streams[device] = cuda.Stream(device)
return _streams[device]
def _async_copy_stream(inputs, device_ids):
nr_devs = len(device_ids)
assert type(inputs) in (tuple, list)
assert len(inputs) == nr_devs
outputs = []
streams = [_get_stream(d) for d in device_ids]
for i, dev, stream in zip(inputs, device_ids, streams):
with cuda.device(dev):
main_stream = cuda.current_stream()
with cuda.stream(stream):
outputs.append(async_copy_to(i, dev, main_stream=main_stream))
main_stream.wait_stream(stream)
return outputs | null |
15,552 | import torch
import torch.multiprocessing as multiprocessing
from torch._C import _set_worker_signal_handlers, \
_remove_worker_pids, _error_if_any_worker_fails
try:
from torch._C import _set_worker_pids
except:
from torch._C import _update_worker_pids as _set_worker_pids
from .sampler import SequentialSampler, RandomSampler, BatchSampler
import signal
import collections
import re
import sys
import threading
import traceback
from torch._six import string_classes, int_classes
import numpy as np
if sys.version_info[0] == 2:
import Queue as queue
else:
import queue
class ExceptionWrapper(object):
r"Wraps an exception plus traceback to communicate across threads"
def __init__(self, exc_info):
self.exc_type = exc_info[0]
self.exc_msg = "".join(traceback.format_exception(*exc_info))
_use_shared_memory = False
def _worker_loop(dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id):
global _use_shared_memory
_use_shared_memory = True
# Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal
# module's handlers are executed after Python returns from C low-level
# handlers, likely when the same fatal signal happened again already.
# https://docs.python.org/3/library/signal.html Sec. 18.8.1.1
_set_worker_signal_handlers()
torch.set_num_threads(1)
torch.manual_seed(seed)
np.random.seed(seed)
if init_fn is not None:
init_fn(worker_id)
while True:
r = index_queue.get()
if r is None:
break
idx, batch_indices = r
try:
samples = collate_fn([dataset[i] for i in batch_indices])
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples)) | null |
15,553 | import torch
import torch.multiprocessing as multiprocessing
from torch._C import _set_worker_signal_handlers, \
_remove_worker_pids, _error_if_any_worker_fails
try:
from torch._C import _set_worker_pids
except:
from torch._C import _update_worker_pids as _set_worker_pids
from .sampler import SequentialSampler, RandomSampler, BatchSampler
import signal
import collections
import re
import sys
import threading
import traceback
from torch._six import string_classes, int_classes
import numpy as np
if sys.version_info[0] == 2:
import Queue as queue
else:
import queue
class ExceptionWrapper(object):
r"Wraps an exception plus traceback to communicate across threads"
def __init__(self, exc_info):
self.exc_type = exc_info[0]
self.exc_msg = "".join(traceback.format_exception(*exc_info))
def pin_memory_batch(batch):
if torch.is_tensor(batch):
return batch.pin_memory()
elif isinstance(batch, string_classes):
return batch
elif isinstance(batch, collections.Mapping):
return {k: pin_memory_batch(sample) for k, sample in batch.items()}
elif isinstance(batch, collections.Sequence):
return [pin_memory_batch(sample) for sample in batch]
else:
return batch
def _worker_manager_loop(in_queue, out_queue, done_event, pin_memory, device_id):
if pin_memory:
torch.cuda.set_device(device_id)
while True:
try:
r = in_queue.get()
except Exception:
if done_event.is_set():
return
raise
if r is None:
break
if isinstance(r[1], ExceptionWrapper):
out_queue.put(r)
continue
idx, batch = r
try:
if pin_memory:
batch = pin_memory_batch(batch)
except Exception:
out_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
out_queue.put((idx, batch)) | null |
15,554 | import torch
import torch.multiprocessing as multiprocessing
from torch._C import _set_worker_signal_handlers, \
_remove_worker_pids, _error_if_any_worker_fails
try:
from torch._C import _set_worker_pids
except:
from torch._C import _update_worker_pids as _set_worker_pids
from .sampler import SequentialSampler, RandomSampler, BatchSampler
import signal
import collections
import re
import sys
import threading
import traceback
from torch._six import string_classes, int_classes
import numpy as np
_use_shared_memory = False
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
The provided code snippet includes necessary dependencies for implementing the `default_collate` function. Write a Python function `def default_collate(batch)` to solve the following problem:
Puts each data field into a tensor with outer dimension batch size
Here is the function:
def default_collate(batch):
"Puts each data field into a tensor with outer dimension batch size"
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if torch.is_tensor(batch[0]):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0])))) | Puts each data field into a tensor with outer dimension batch size |
15,555 | import torch
import torch.multiprocessing as multiprocessing
from torch._C import _set_worker_signal_handlers, \
_remove_worker_pids, _error_if_any_worker_fails
from .sampler import SequentialSampler, RandomSampler, BatchSampler
import signal
import collections
import re
import sys
import threading
import traceback
from torch._six import string_classes, int_classes
import numpy as np
if sys.version_info[0] == 2:
import Queue as queue
else:
import queue
_SIGCHLD_handler_set = False
def _set_SIGCHLD_handler():
# Windows doesn't support SIGCHLD handler
if sys.platform == 'win32':
return
# can't set signal in child threads
if not isinstance(threading.current_thread(), threading._MainThread):
return
global _SIGCHLD_handler_set
if _SIGCHLD_handler_set:
return
previous_handler = signal.getsignal(signal.SIGCHLD)
if not callable(previous_handler):
previous_handler = None
def handler(signum, frame):
# This following call uses `waitid` with WNOHANG from C side. Therefore,
# Python can still get and update the process status successfully.
_error_if_any_worker_fails()
if previous_handler is not None:
previous_handler(signum, frame)
signal.signal(signal.SIGCHLD, handler)
_SIGCHLD_handler_set = True | null |
15,556 | import bisect
import warnings
from torch._utils import _accumulate
from torch import randperm
class Subset(Dataset):
def __init__(self, dataset, indices):
self.dataset = dataset
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
The provided code snippet includes necessary dependencies for implementing the `random_split` function. Write a Python function `def random_split(dataset, lengths)` to solve the following problem:
Randomly split a dataset into non-overlapping new datasets of given lengths ds Arguments: dataset (Dataset): Dataset to be split lengths (iterable): lengths of splits to be produced
Here is the function:
def random_split(dataset, lengths):
"""
Randomly split a dataset into non-overlapping new datasets of given lengths
ds
Arguments:
dataset (Dataset): Dataset to be split
lengths (iterable): lengths of splits to be produced
"""
if sum(lengths) != len(dataset):
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
indices = randperm(sum(lengths))
return [Subset(dataset, indices[offset - length:offset]) for offset, length in zip(_accumulate(lengths), lengths)] | Randomly split a dataset into non-overlapping new datasets of given lengths ds Arguments: dataset (Dataset): Dataset to be split lengths (iterable): lengths of splits to be produced |
15,557 | import torch
from torch.autograd import Variable
import numpy as np
import collections
def as_variable(obj):
if isinstance(obj, Variable):
return obj
if isinstance(obj, collections.Sequence):
return [as_variable(v) for v in obj]
elif isinstance(obj, collections.Mapping):
return {k: as_variable(v) for k, v in obj.items()}
else:
return Variable(obj) | null |
15,558 | import torch
from torch.autograd import Variable
import numpy as np
import collections
def as_numpy(obj):
if isinstance(obj, collections.Sequence):
return [as_numpy(v) for v in obj]
elif isinstance(obj, collections.Mapping):
return {k: as_numpy(v) for k, v in obj.items()}
elif isinstance(obj, Variable):
return obj.data.cpu().numpy()
elif torch.is_tensor(obj):
return obj.cpu().numpy()
else:
return np.array(obj) | null |
15,559 | import torch
from torch.autograd import Variable
import numpy as np
import collections
def mark_volatile(obj):
if torch.is_tensor(obj):
obj = Variable(obj)
if isinstance(obj, Variable):
obj.no_grad = True
return obj
elif isinstance(obj, collections.Mapping):
return {k: mark_volatile(o) for k, o in obj.items()}
elif isinstance(obj, collections.Sequence):
return [mark_volatile(o) for o in obj]
else:
return obj | null |
15,560 | import os
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.io import loadmat
from torch.nn.modules import BatchNorm2d
from . import resnet
from . import mobilenet
def conv3x3_bn_relu(in_planes, out_planes, stride=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False),
BatchNorm2d(out_planes),
nn.ReLU(inplace=True),
) | null |
15,561 | import math
import torch.nn as nn
from torch.nn import BatchNorm2d
from .utils import load_url
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False) | 3x3 convolution with padding |
15,562 | import math
import torch.nn as nn
from torch.nn import BatchNorm2d
from .utils import load_url
model_urls = {
'resnet50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet50-imagenet.pth',
}
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 128
super(ResNet, self).__init__()
self.conv1 = conv3x3(3, 64, stride=2)
self.bn1 = BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = conv3x3(64, 64)
self.bn2 = BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = conv3x3(64, 128)
self.bn3 = BatchNorm2d(128)
self.relu3 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def load_url(url, model_dir='./pretrained', map_location=None):
if not os.path.exists(model_dir):
os.makedirs(model_dir)
filename = url.split('/')[-1]
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
urlretrieve(url, cached_file)
return torch.load(cached_file, map_location=map_location)
The provided code snippet includes necessary dependencies for implementing the `resnet50` function. Write a Python function `def resnet50(pretrained=False, **kwargs)` to solve the following problem:
Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['resnet50']), strict=False)
return model | Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
15,563 | import math
import torch.nn as nn
from torch.nn import BatchNorm2d
from .utils import load_url
model_urls = {
'resnet50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet50-imagenet.pth',
}
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 128
super(ResNet, self).__init__()
self.conv1 = conv3x3(3, 64, stride=2)
self.bn1 = BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = conv3x3(64, 64)
self.bn2 = BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = conv3x3(64, 128)
self.bn3 = BatchNorm2d(128)
self.relu3 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def load_url(url, model_dir='./pretrained', map_location=None):
if not os.path.exists(model_dir):
os.makedirs(model_dir)
filename = url.split('/')[-1]
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
urlretrieve(url, cached_file)
return torch.load(cached_file, map_location=map_location)
The provided code snippet includes necessary dependencies for implementing the `resnet18` function. Write a Python function `def resnet18(pretrained=False, **kwargs)` to solve the following problem:
Constructs a ResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['resnet18']))
return model | Constructs a ResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
15,564 | import glob
import os
import PIL.Image as Image
import cv2
import numpy as np
import tqdm
import shutil
from saicinpainting.evaluation.utils import load_yaml
def generate_masks_for_img(infile, outmask_pattern, mask_size=200, step=0.5):
inimg = Image.open(infile)
width, height = inimg.size
step_abs = int(mask_size * step)
mask = np.zeros((height, width), dtype='uint8')
mask_i = 0
for start_vertical in range(0, height - step_abs, step_abs):
for start_horizontal in range(0, width - step_abs, step_abs):
mask[start_vertical:start_vertical + mask_size, start_horizontal:start_horizontal + mask_size] = 255
cv2.imwrite(outmask_pattern.format(mask_i), mask)
mask[start_vertical:start_vertical + mask_size, start_horizontal:start_horizontal + mask_size] = 0
mask_i += 1 | null |
15,576 | import torch
import torch.multiprocessing as multiprocessing
from torch._C import _set_worker_signal_handlers, \
_remove_worker_pids, _error_if_any_worker_fails
try:
from torch._C import _set_worker_pids
except:
from torch._C import _update_worker_pids as _set_worker_pids
from .sampler import SequentialSampler, RandomSampler, BatchSampler
import signal
import collections
import re
import sys
import threading
import traceback
from torch._six import string_classes, int_classes
import numpy as np
if sys.version_info[0] == 2:
import Queue as queue
else:
import queue
class ExceptionWrapper(object):
def __init__(self, exc_info):
_use_shared_memory = False
def _worker_loop(dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id):
global _use_shared_memory
_use_shared_memory = True
# Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal
# module's handlers are executed after Python returns from C low-level
# handlers, likely when the same fatal signal happened again already.
# https://docs.python.org/3/library/signal.html Sec. 18.8.1.1
_set_worker_signal_handlers()
torch.set_num_threads(1)
torch.manual_seed(seed)
np.random.seed(seed)
if init_fn is not None:
init_fn(worker_id)
while True:
r = index_queue.get()
if r is None:
break
idx, batch_indices = r
try:
samples = collate_fn([dataset[i] for i in batch_indices])
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples)) | null |
15,588 | import os
import numpy as np
from pathlib import Path
import sys
import random
from PIL import Image
import numpy as np
import argparse
from functools import partial
import gradio as gr
import gradio.themes.base as ThemeBase
from gradio.themes.utils import colors, fonts, sizes
from openai.error import APIConnectionError
from iGPT.controllers import ConversationBot
import openai
from langchain.llms.openai import OpenAI
def cut_dialogue_history(history_memory, keep_last_n_words=500):
if history_memory is None or len(history_memory) == 0:
return history_memory
tokens = history_memory.split()
n_tokens = len(tokens)
print(f"history_memory:{history_memory}, n_tokens: {n_tokens}")
if n_tokens < keep_last_n_words:
return history_memory
paragraphs = history_memory.split('\n')
last_n_tokens = n_tokens
while last_n_tokens >= keep_last_n_words:
last_n_tokens -= len(paragraphs[0].split(' '))
paragraphs = paragraphs[1:]
return '\n' + '\n'.join(paragraphs) | null |
15,589 | import os
import numpy as np
os.environ['CURL_CA_BUNDLE'] = ''
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
from pathlib import Path
import sys
import random
from PIL import Image
import numpy as np
import argparse
from functools import partial
import gradio as gr
import gradio.themes.base as ThemeBase
from gradio.themes.utils import colors, fonts, sizes
from openai.error import APIConnectionError
from iGPT.controllers import ConversationBot
import openai
from langchain.llms.openai import OpenAI
os.makedirs('image', exist_ok=True)
def login_with_key(bot, debug, api_key):
# Just for debug
print('===>logging in')
user_state = [{}]
is_error = True
if debug:
user_state = bot.init_agent()
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False, value=''), user_state
else:
if api_key and len(api_key) > 30:
print(api_key)
os.environ["OPENAI_API_KEY"] = api_key
openai.api_key = api_key
try:
llm = OpenAI(temperature=0)
llm('Hi!')
response = 'Success!'
is_error = False
user_state = bot.init_agent()
except Exception as err:
# gr.update(visible=True)
print(err)
response = 'Incorrect key, please input again'
is_error = True
else:
is_error = True
response = 'Incorrect key, please input again'
return gr.update(visible=not is_error), gr.update(visible=is_error), gr.update(visible=is_error, value=response), user_state | null |
15,590 | import os
import numpy as np
from pathlib import Path
import sys
import random
from PIL import Image
import numpy as np
import argparse
from functools import partial
import gradio as gr
import gradio.themes.base as ThemeBase
from gradio.themes.utils import colors, fonts, sizes
from openai.error import APIConnectionError
from iGPT.controllers import ConversationBot
import openai
from langchain.llms.openai import OpenAI
def change_input_type(flag):
if flag:
print('Using voice input.')
else:
print('Using text input.')
return gr.update(visible=not flag), gr.update(visible=flag) | null |
15,591 | import os
import numpy as np
os.environ['CURL_CA_BUNDLE'] = ''
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
from pathlib import Path
import sys
import random
from PIL import Image
import numpy as np
import argparse
from functools import partial
import gradio as gr
import gradio.themes.base as ThemeBase
from gradio.themes.utils import colors, fonts, sizes
from openai.error import APIConnectionError
from iGPT.controllers import ConversationBot
import openai
from langchain.llms.openai import OpenAI
os.makedirs('image', exist_ok=True)
def random_image():
root_path = './assets/images'
img_list = os.listdir(root_path)
img_item = random.sample(img_list, 1)[0]
return Image.open(os.path.join(root_path, img_item)) | null |
15,592 | import os
import numpy as np
os.environ['CURL_CA_BUNDLE'] = ''
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
from pathlib import Path
import sys
import random
from PIL import Image
import numpy as np
import argparse
from functools import partial
import gradio as gr
import gradio.themes.base as ThemeBase
from gradio.themes.utils import colors, fonts, sizes
from openai.error import APIConnectionError
from iGPT.controllers import ConversationBot
import openai
from langchain.llms.openai import OpenAI
os.makedirs('image', exist_ok=True)
def random_video():
root_path = './assets/videos'
vid_list = os.listdir(root_path)
vid_item = random.sample(vid_list, 1)[0]
return os.path.join(root_path, vid_item) | null |
15,593 | import os
import numpy as np
os.environ['CURL_CA_BUNDLE'] = ''
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
from pathlib import Path
import sys
import random
from PIL import Image
import numpy as np
import argparse
from functools import partial
import gradio as gr
import gradio.themes.base as ThemeBase
from gradio.themes.utils import colors, fonts, sizes
from openai.error import APIConnectionError
from iGPT.controllers import ConversationBot
import openai
from langchain.llms.openai import OpenAI
os.makedirs('image', exist_ok=True)
def random_audio():
root_path = './assets/audio'
aud_list = os.listdir(root_path)
aud_item = random.sample(aud_list, 1)[0]
print(os.path.join(root_path, aud_item))
return os.path.join(root_path, aud_item) | null |
15,594 | import os
import numpy as np
from pathlib import Path
import sys
import random
from PIL import Image
import numpy as np
import argparse
from functools import partial
import gradio as gr
import gradio.themes.base as ThemeBase
from gradio.themes.utils import colors, fonts, sizes
from openai.error import APIConnectionError
from iGPT.controllers import ConversationBot
import openai
from langchain.llms.openai import OpenAI
def add_whiteboard():
# wb = np.ones((1080, 1920, 3), dtype=np.uint8) * 255
wb = np.ones((720, 1280, 3), dtype=np.uint8) * 255
return Image.fromarray(wb) | null |
15,595 | import os
import numpy as np
from pathlib import Path
import sys
import random
from PIL import Image
import numpy as np
import argparse
from functools import partial
import gradio as gr
import gradio.themes.base as ThemeBase
from gradio.themes.utils import colors, fonts, sizes
from openai.error import APIConnectionError
from iGPT.controllers import ConversationBot
import openai
from langchain.llms.openai import OpenAI
def change_max_iter(max_iters):
return gr.update(maximum=max_iters) | null |
15,596 | import inspect
import re
import os
import numpy as np
import uuid
import shutil
import whisper
import torch
import gradio as gr
import imageio
from io import BytesIO
import requests as req
from PIL import Image
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI
from ..models import *
from iGPT.models.utils import (gen_new_name, to_image,
seed_everything, add_points_to_image)
from ..models.drag_gan import drag_gan
def cut_dialogue_history(history_memory, keep_last_n_words=500):
if history_memory is None or len(history_memory) == 0:
return history_memory
tokens = history_memory.split()
n_tokens = len(tokens)
print(f"history_memory:{history_memory}, n_tokens: {n_tokens}")
if n_tokens < keep_last_n_words:
return history_memory
paragraphs = history_memory.split('\n')
last_n_tokens = n_tokens
while last_n_tokens >= keep_last_n_words:
last_n_tokens -= len(paragraphs[0].split(' '))
paragraphs = paragraphs[1:]
return '\n' + '\n'.join(paragraphs) | null |
15,597 | from .intern_action import intern_action_b16
from huggingface_hub import hf_hub_download
import torch
import torch.nn as nn
import torchvision.transforms as T
import torch.nn.functional as F
import numpy as np
from .processing import (
GroupNormalize, GroupScale, GroupCenterCrop,
Stack, ToTorchFormatTensor
)
def get_index(num_frames, num_segments=8):
seg_size = float(num_frames - 1) / num_segments
start = int(seg_size / 2)
offsets = np.array([
start + int(np.round(seg_size * idx)) for idx in range(num_segments)
])
return offsets | null |
15,598 | from .intern_action import intern_action_b16
from huggingface_hub import hf_hub_download
import torch
import torch.nn as nn
import torchvision.transforms as T
import torch.nn.functional as F
import numpy as np
from .processing import (
GroupNormalize, GroupScale, GroupCenterCrop,
Stack, ToTorchFormatTensor
)
class GroupCenterCrop(object):
def __init__(self, size):
self.worker = torchvision.transforms.CenterCrop(size)
def __call__(self, img_group):
return [self.worker(img) for img in img_group]
class GroupNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
rep_mean = self.mean * (tensor.size()[0] // len(self.mean))
rep_std = self.std * (tensor.size()[0] // len(self.std))
# TODO: make efficient
for t, m, s in zip(tensor, rep_mean, rep_std):
t.sub_(m).div_(s)
return tensor
class GroupScale(object):
""" Rescales the input PIL.Image to the given 'size'.
'size' will be the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.worker = torchvision.transforms.Resize(size, interpolation)
def __call__(self, img_group):
return [self.worker(img) for img in img_group]
class Stack(object):
def __init__(self, roll=False):
self.roll = roll
def __call__(self, img_group):
if img_group[0].mode == 'L':
return np.concatenate([np.expand_dims(x, 2)
for x in img_group], axis=2)
elif img_group[0].mode == 'RGB':
if self.roll:
return np.concatenate([np.array(x)[:, :, ::-1]
for x in img_group], axis=2)
else:
#print(np.concatenate(img_group, axis=2).shape)
# print(img_group[0].shape)
return np.concatenate(img_group, axis=2)
class ToTorchFormatTensor(object):
""" Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255]
to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """
def __init__(self, div=True):
self.div = div
def __call__(self, pic):
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic).permute(2, 0, 1).contiguous()
else:
# handle PIL Image
img = torch.ByteTensor(
torch.ByteStorage.from_buffer(
pic.tobytes()))
img = img.view(pic.size[1], pic.size[0], len(pic.mode))
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
return img.float().div(255) if self.div else img.float()
def transform_action():
# transform
crop_size = 224
scale_size = 256
input_mean = [0.485, 0.456, 0.406]
input_std = [0.229, 0.224, 0.225]
return T.Compose([
# T.ToPILImage(),
GroupScale(int(scale_size)),
GroupCenterCrop(crop_size),
Stack(),
ToTorchFormatTensor(),
GroupNormalize(input_mean, input_std)
]) | null |
15,599 | from .intern_action import intern_action_b16
from huggingface_hub import hf_hub_download
import torch
import torch.nn as nn
import torchvision.transforms as T
import torch.nn.functional as F
import numpy as np
from .processing import (
GroupNormalize, GroupScale, GroupCenterCrop,
Stack, ToTorchFormatTensor
)
class Intern_Action(nn.Module):
def __init__(self, model):
super().__init__()
self.backbone = model
def forward(self, x):
return self.backbone(x)
kinetics_classnames = {
"0": "riding a bike",
"1": "marching",
"2": "dodgeball",
"3": "playing cymbals",
"4": "checking tires",
"5": "roller skating",
"6": "tasting beer",
"7": "clapping",
"8": "drawing",
"9": "juggling fire",
"10": "bobsledding",
"11": "petting animal (not cat)",
"12": "spray painting",
"13": "training dog",
"14": "eating watermelon",
"15": "building cabinet",
"16": "applauding",
"17": "playing harp",
"18": "balloon blowing",
"19": "sled dog racing",
"20": "wrestling",
"21": "pole vault",
"22": "hurling (sport)",
"23": "riding scooter",
"24": "shearing sheep",
"25": "sweeping floor",
"26": "eating carrots",
"27": "skateboarding",
"28": "dunking basketball",
"29": "disc golfing",
"30": "eating spaghetti",
"31": "playing flute",
"32": "riding mechanical bull",
"33": "making sushi",
"34": "trapezing",
"35": "picking fruit",
"36": "stretching leg",
"37": "playing ukulele",
"38": "tying tie",
"39": "skydiving",
"40": "playing cello",
"41": "jumping into pool",
"42": "shooting goal (soccer)",
"43": "trimming trees",
"44": "bookbinding",
"45": "ski jumping",
"46": "walking the dog",
"47": "riding unicycle",
"48": "shaving head",
"49": "hopscotch",
"50": "playing piano",
"51": "parasailing",
"52": "bartending",
"53": "kicking field goal",
"54": "finger snapping",
"55": "dining",
"56": "yawning",
"57": "peeling potatoes",
"58": "canoeing or kayaking",
"59": "front raises",
"60": "laughing",
"61": "dancing macarena",
"62": "digging",
"63": "reading newspaper",
"64": "hitting baseball",
"65": "clay pottery making",
"66": "exercising with an exercise ball",
"67": "playing saxophone",
"68": "shooting basketball",
"69": "washing hair",
"70": "lunge",
"71": "brushing hair",
"72": "curling hair",
"73": "kitesurfing",
"74": "tapping guitar",
"75": "bending back",
"76": "skipping rope",
"77": "situp",
"78": "folding paper",
"79": "cracking neck",
"80": "assembling computer",
"81": "cleaning gutters",
"82": "blowing out candles",
"83": "shaking hands",
"84": "dancing gangnam style",
"85": "windsurfing",
"86": "tap dancing",
"87": "skiing (not slalom or crosscountry)",
"88": "bandaging",
"89": "push up",
"90": "doing nails",
"91": "punching person (boxing)",
"92": "bouncing on trampoline",
"93": "scrambling eggs",
"94": "singing",
"95": "cleaning floor",
"96": "krumping",
"97": "drumming fingers",
"98": "snowmobiling",
"99": "gymnastics tumbling",
"100": "headbanging",
"101": "catching or throwing frisbee",
"102": "riding elephant",
"103": "bee keeping",
"104": "feeding birds",
"105": "snatch weight lifting",
"106": "mowing lawn",
"107": "fixing hair",
"108": "playing trumpet",
"109": "flying kite",
"110": "crossing river",
"111": "swinging legs",
"112": "sanding floor",
"113": "belly dancing",
"114": "sneezing",
"115": "clean and jerk",
"116": "side kick",
"117": "filling eyebrows",
"118": "shuffling cards",
"119": "recording music",
"120": "cartwheeling",
"121": "feeding fish",
"122": "folding clothes",
"123": "water skiing",
"124": "tobogganing",
"125": "blowing leaves",
"126": "smoking",
"127": "unboxing",
"128": "tai chi",
"129": "waxing legs",
"130": "riding camel",
"131": "slapping",
"132": "tossing salad",
"133": "capoeira",
"134": "playing cards",
"135": "playing organ",
"136": "playing violin",
"137": "playing drums",
"138": "tapping pen",
"139": "vault",
"140": "shoveling snow",
"141": "playing tennis",
"142": "getting a tattoo",
"143": "making a sandwich",
"144": "making tea",
"145": "grinding meat",
"146": "squat",
"147": "eating doughnuts",
"148": "ice fishing",
"149": "snowkiting",
"150": "kicking soccer ball",
"151": "playing controller",
"152": "giving or receiving award",
"153": "welding",
"154": "throwing discus",
"155": "throwing axe",
"156": "ripping paper",
"157": "swimming butterfly stroke",
"158": "air drumming",
"159": "blowing nose",
"160": "hockey stop",
"161": "taking a shower",
"162": "bench pressing",
"163": "planting trees",
"164": "pumping fist",
"165": "climbing tree",
"166": "tickling",
"167": "high kick",
"168": "waiting in line",
"169": "slacklining",
"170": "tango dancing",
"171": "hurdling",
"172": "carrying baby",
"173": "celebrating",
"174": "sharpening knives",
"175": "passing American football (in game)",
"176": "headbutting",
"177": "playing recorder",
"178": "brush painting",
"179": "garbage collecting",
"180": "robot dancing",
"181": "shredding paper",
"182": "pumping gas",
"183": "rock climbing",
"184": "hula hooping",
"185": "braiding hair",
"186": "opening present",
"187": "texting",
"188": "decorating the christmas tree",
"189": "answering questions",
"190": "playing keyboard",
"191": "writing",
"192": "bungee jumping",
"193": "sniffing",
"194": "eating burger",
"195": "playing accordion",
"196": "making pizza",
"197": "playing volleyball",
"198": "tasting food",
"199": "pushing cart",
"200": "spinning poi",
"201": "cleaning windows",
"202": "arm wrestling",
"203": "changing oil",
"204": "swimming breast stroke",
"205": "tossing coin",
"206": "deadlifting",
"207": "hoverboarding",
"208": "cutting watermelon",
"209": "cheerleading",
"210": "snorkeling",
"211": "washing hands",
"212": "eating cake",
"213": "pull ups",
"214": "surfing water",
"215": "eating hotdog",
"216": "holding snake",
"217": "playing harmonica",
"218": "ironing",
"219": "cutting nails",
"220": "golf chipping",
"221": "shot put",
"222": "hugging",
"223": "playing clarinet",
"224": "faceplanting",
"225": "trimming or shaving beard",
"226": "drinking shots",
"227": "riding mountain bike",
"228": "tying bow tie",
"229": "swinging on something",
"230": "skiing crosscountry",
"231": "unloading truck",
"232": "cleaning pool",
"233": "jogging",
"234": "ice climbing",
"235": "mopping floor",
"236": "making bed",
"237": "diving cliff",
"238": "washing dishes",
"239": "grooming dog",
"240": "weaving basket",
"241": "frying vegetables",
"242": "stomping grapes",
"243": "moving furniture",
"244": "cooking sausages",
"245": "doing laundry",
"246": "dying hair",
"247": "knitting",
"248": "reading book",
"249": "baby waking up",
"250": "punching bag",
"251": "surfing crowd",
"252": "cooking chicken",
"253": "pushing car",
"254": "springboard diving",
"255": "swing dancing",
"256": "massaging legs",
"257": "beatboxing",
"258": "breading or breadcrumbing",
"259": "somersaulting",
"260": "brushing teeth",
"261": "stretching arm",
"262": "juggling balls",
"263": "massaging person's head",
"264": "eating ice cream",
"265": "extinguishing fire",
"266": "hammer throw",
"267": "whistling",
"268": "crawling baby",
"269": "using remote controller (not gaming)",
"270": "playing cricket",
"271": "opening bottle",
"272": "playing xylophone",
"273": "motorcycling",
"274": "driving car",
"275": "exercising arm",
"276": "passing American football (not in game)",
"277": "playing kickball",
"278": "sticking tongue out",
"279": "flipping pancake",
"280": "catching fish",
"281": "eating chips",
"282": "shaking head",
"283": "sword fighting",
"284": "playing poker",
"285": "cooking on campfire",
"286": "doing aerobics",
"287": "paragliding",
"288": "using segway",
"289": "folding napkins",
"290": "playing bagpipes",
"291": "gargling",
"292": "skiing slalom",
"293": "strumming guitar",
"294": "javelin throw",
"295": "waxing back",
"296": "riding or walking with horse",
"297": "plastering",
"298": "long jump",
"299": "parkour",
"300": "wrapping present",
"301": "egg hunting",
"302": "archery",
"303": "cleaning toilet",
"304": "swimming backstroke",
"305": "snowboarding",
"306": "catching or throwing baseball",
"307": "massaging back",
"308": "blowing glass",
"309": "playing guitar",
"310": "playing chess",
"311": "golf driving",
"312": "presenting weather forecast",
"313": "rock scissors paper",
"314": "high jump",
"315": "baking cookies",
"316": "using computer",
"317": "washing feet",
"318": "arranging flowers",
"319": "playing bass guitar",
"320": "spraying",
"321": "cutting pineapple",
"322": "waxing chest",
"323": "auctioning",
"324": "jetskiing",
"325": "drinking",
"326": "busking",
"327": "playing monopoly",
"328": "salsa dancing",
"329": "waxing eyebrows",
"330": "watering plants",
"331": "zumba",
"332": "chopping wood",
"333": "pushing wheelchair",
"334": "carving pumpkin",
"335": "building shed",
"336": "making jewelry",
"337": "catching or throwing softball",
"338": "bending metal",
"339": "ice skating",
"340": "dancing charleston",
"341": "abseiling",
"342": "climbing a rope",
"343": "crying",
"344": "cleaning shoes",
"345": "dancing ballet",
"346": "driving tractor",
"347": "triple jump",
"348": "throwing ball",
"349": "getting a haircut",
"350": "running on treadmill",
"351": "climbing ladder",
"352": "blasting sand",
"353": "playing trombone",
"354": "drop kicking",
"355": "country line dancing",
"356": "changing wheel",
"357": "feeding goats",
"358": "tying knot (not on a tie)",
"359": "setting table",
"360": "shaving legs",
"361": "kissing",
"362": "riding mule",
"363": "counting money",
"364": "laying bricks",
"365": "barbequing",
"366": "news anchoring",
"367": "smoking hookah",
"368": "cooking egg",
"369": "peeling apples",
"370": "yoga",
"371": "sharpening pencil",
"372": "dribbling basketball",
"373": "petting cat",
"374": "playing ice hockey",
"375": "milking cow",
"376": "shining shoes",
"377": "juggling soccer ball",
"378": "scuba diving",
"379": "playing squash or racquetball",
"380": "drinking beer",
"381": "sign language interpreting",
"382": "playing basketball",
"383": "breakdancing",
"384": "testifying",
"385": "making snowman",
"386": "golf putting",
"387": "playing didgeridoo",
"388": "biking through snow",
"389": "sailing",
"390": "jumpstyle dancing",
"391": "water sliding",
"392": "grooming horse",
"393": "massaging feet",
"394": "playing paintball",
"395": "making a cake",
"396": "bowling",
"397": "contact juggling",
"398": "applying cream",
"399": "playing badminton"
}
def intern_action_b16(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
temporal_downsample=True,
no_lmhra=False, double_lmhra=True,
return_list=[8, 9, 10, 11],
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate,
temporal_downsample=temporal_downsample,
no_lmhra=no_lmhra,
double_lmhra=double_lmhra,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def load_intern_action():
# Create an id to label name mapping
kinetics_id_to_classname = {}
for k, v in kinetics_classnames.items():
kinetics_id_to_classname[k] = v
model_path = hf_hub_download(repo_id="Andy1621/uniformerv2", filename="k400+k710_uniformerv2_b16_8x224.pyth")
# Pick a pretrained model
model = Intern_Action(intern_action_b16(pretrained=False, t_size=8, no_lmhra=True, temporal_downsample=False))
#state_dict = torch.load(model_path, map_location=device)
state_dict = torch.load(model_path, map_location="cpu")
model.load_state_dict(state_dict)
# Set to eval mode and move to desired device
# model = model.to(device)
model = model.eval()
return model | null |
15,600 | from .intern_action import intern_action_b16
from huggingface_hub import hf_hub_download
import torch
import torch.nn as nn
import torchvision.transforms as T
import torch.nn.functional as F
import numpy as np
from .processing import (
GroupNormalize, GroupScale, GroupCenterCrop,
Stack, ToTorchFormatTensor
)
def cut_frame_to_8(data):
index = np.linspace(0, len(data)-1, 8).astype(int)
return data[index] | null |
15,601 | import warnings
from .vit import VisionTransformer, interpolate_pos_embed
from .swin_transformer import SwinTransformer, interpolate_relative_pos_embed
from .med import BertConfig, BertModel, BertLMHeadModel
from .utils import tra_array
from transformers import BertTokenizer
import torch
from torch import nn
import torch.nn.functional as F
import os
from urllib.parse import urlparse
from timm.models.hub import download_cached_file
import json
import math
import numpy as np
class Tag2Text_Caption(nn.Module):
def __init__(self,
med_config = 'configs/med_config.json',
image_size = 384,
vit = 'base',
vit_grad_ckpt = False,
vit_ckpt_layer = 0,
prompt = 'a picture of ',
threshold = 0.68,
):
def del_selfattention(self):
def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0, tag_input = None, return_tag_predict = False):
from typing import List
def load_checkpoint(model,url_or_filename):
def load_checkpoint_swinbase(model,url_or_filename,kwargs):
def tag2text_caption(pretrained='',**kwargs):
model = Tag2Text_Caption(**kwargs)
if pretrained:
if kwargs['vit'] == 'swin_b':
model,msg = load_checkpoint_swinbase(model,pretrained,kwargs)
else:
model,msg = load_checkpoint(model,pretrained)
# print('vit:',kwargs['vit'])
# print('msg_v2',msg)
return model | null |
15,602 | import warnings
from .vit import VisionTransformer, interpolate_pos_embed
from .swin_transformer import SwinTransformer, interpolate_relative_pos_embed
from .med import BertConfig, BertModel, BertLMHeadModel
from .utils import tra_array
from transformers import BertTokenizer
import torch
from torch import nn
import torch.nn.functional as F
import os
from urllib.parse import urlparse
from timm.models.hub import download_cached_file
import json
import math
import numpy as np
from typing import List
def tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str, skip_key:str):
uninitialized_encoder_weights: List[str] = []
if decoder.__class__ != encoder.__class__:
logger.info(
f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized."
)
def tie_encoder_to_decoder_recursively(
decoder_pointer: nn.Module,
encoder_pointer: nn.Module,
module_name: str,
uninitialized_encoder_weights: List[str],
skip_key: str,
depth=0,
):
assert isinstance(decoder_pointer, nn.Module) and isinstance(
encoder_pointer, nn.Module
), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.nn.Module"
if hasattr(decoder_pointer, "weight") and skip_key not in module_name:
assert hasattr(encoder_pointer, "weight")
encoder_pointer.weight = decoder_pointer.weight
if hasattr(decoder_pointer, "bias"):
assert hasattr(encoder_pointer, "bias")
encoder_pointer.bias = decoder_pointer.bias
# print(module_name+' is tied')
return
encoder_modules = encoder_pointer._modules
decoder_modules = decoder_pointer._modules
if len(decoder_modules) > 0:
assert (
len(encoder_modules) > 0
), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"
all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()])
encoder_layer_pos = 0
for name, module in decoder_modules.items():
if name.isdigit():
encoder_name = str(int(name) + encoder_layer_pos)
decoder_name = name
if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len(
encoder_modules
) != len(decoder_modules):
# this can happen if the name corresponds to the position in a list module list of layers
# in this case the decoder has added a cross-attention that the encoder does not have
# thus skip this step and subtract one layer pos from encoder
encoder_layer_pos -= 1
continue
elif name not in encoder_modules:
continue
elif depth > 500:
raise ValueError(
"Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model."
)
else:
decoder_name = encoder_name = name
tie_encoder_to_decoder_recursively(
decoder_modules[decoder_name],
encoder_modules[encoder_name],
module_name + "/" + name,
uninitialized_encoder_weights,
skip_key,
depth=depth + 1,
)
all_encoder_weights.remove(module_name + "/" + encoder_name)
uninitialized_encoder_weights += list(all_encoder_weights)
# tie weights recursively
tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights, skip_key) | null |
15,603 | import warnings
from .vit import VisionTransformer, interpolate_pos_embed
from .swin_transformer import SwinTransformer, interpolate_relative_pos_embed
from .med import BertConfig, BertModel, BertLMHeadModel
from .utils import tra_array
from transformers import BertTokenizer
import torch
from torch import nn
import torch.nn.functional as F
import os
from urllib.parse import urlparse
from timm.models.hub import download_cached_file
import json
import math
import numpy as np
from typing import List
def init_tokenizer():
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer.add_special_tokens({'bos_token':'[DEC]'})
tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']})
tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0]
return tokenizer | null |
15,604 | import warnings
from .vit import VisionTransformer, interpolate_pos_embed
from .swin_transformer import SwinTransformer, interpolate_relative_pos_embed
from .med import BertConfig, BertModel, BertLMHeadModel
from .utils import tra_array
from transformers import BertTokenizer
import torch
from torch import nn
import torch.nn.functional as F
import os
from urllib.parse import urlparse
from timm.models.hub import download_cached_file
import json
import math
import numpy as np
from typing import List
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None,
use_grad_checkpointing=False, ckpt_layer=0):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
"""
super().__init__()
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer)
)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def forward(self, x, register_blk=-1):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed[:,:x.size(1),:]
x = self.pos_drop(x)
for i,blk in enumerate(self.blocks):
x = blk(x, register_blk==i)
x = self.norm(x)
return x
def load_pretrained(self, checkpoint_path, prefix=''):
_load_weights(self, checkpoint_path, prefix)
def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0):
assert vit in ['base', 'large'], "vit parameter must be base or large"
if vit=='base':
vision_width = 768
visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12,
num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
drop_path_rate=0 or drop_path_rate
)
elif vit=='large':
vision_width = 1024
visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24,
num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
drop_path_rate=0.1 or drop_path_rate
)
return visual_encoder, vision_width | null |
15,605 | import dataclasses
from enum import auto, Enum
from typing import List, Tuple, Any
conv_one_shot = Conversation(
system="A chat between a curious human and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
roles=("Human", "Assistant"),
messages=(
(
"Human",
"What are the key differences between renewable and non-renewable energy sources?",
),
(
"Assistant",
"Renewable energy sources are those that can be replenished naturally in a relatively "
"short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
"Non-renewable energy sources, on the other hand, are finite and will eventually be "
"depleted, such as coal, oil, and natural gas. Here are some key differences between "
"renewable and non-renewable energy sources:\n"
"1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
"energy sources are finite and will eventually run out.\n"
"2. Environmental impact: Renewable energy sources have a much lower environmental impact "
"than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
"and other negative effects.\n"
"3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
"have lower operational costs than non-renewable sources.\n"
"4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
"locations than non-renewable sources.\n"
"5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
"situations and needs, while non-renewable sources are more rigid and inflexible.\n"
"6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
"non-renewable sources are not, and their depletion can lead to economic and social instability.",
),
),
offset=2,
sep_style=SeparatorStyle.SINGLE,
sep="###",
)
conv_husky = Conversation(
system="",
roles=("Human", "Assistant"),
messages=(),
offset=0,
sep_style=SeparatorStyle.TWO,
sep=" ",
sep2="</s>",
)
conv_multi_model = Conversation(
system="",
roles=("Human", "Assistant"),
messages=(),
offset=0,
sep_style=SeparatorStyle.TWO,
sep=" ",
sep2="</s>",
)
def get_default_conv_template(model_name):
model_name = model_name.lower()
if "husky" in model_name or "llama" in model_name:
return conv_husky
elif "multi_model" in model_name or "vision" in model_name:
return conv_multi_model
return conv_one_shot | null |
15,606 | import dataclasses
from enum import auto, Enum
from typing import List, Tuple, Any
def compute_skip_echo_len(model_name, conv, prompt):
model_name = model_name.lower()
if "husky" in model_name:
skip_echo_len = len(prompt) - prompt.count("</s>") * 3
else:
skip_echo_len = len(prompt) + 1 - prompt.count("</s>") * 3
return skip_echo_len | null |
15,607 | import dataclasses
import torch
from torch import Tensor
import torch.nn as nn
from torch.nn import functional as F
class CLinear(nn.Module):
"""Compressed Linear Layer."""
def __init__(self, weight, bias, device):
super().__init__()
self.weight = compress(weight.data.to(device), default_compression_config)
self.bias = bias
def forward(self, input: Tensor) -> Tensor:
weight = decompress(self.weight, default_compression_config)
return F.linear(input, weight, self.bias)
def compress_module(module, target_device):
for attr_str in dir(module):
target_attr = getattr(module, attr_str)
if type(target_attr) == torch.nn.Linear:
setattr(
module,
attr_str,
CLinear(target_attr.weight, target_attr.bias, target_device),
)
for name, child in module.named_children():
compress_module(child, target_device) | null |
15,608 | import dataclasses
import torch
from torch import Tensor
import torch.nn as nn
from torch.nn import functional as F
class CLinear_V2(nn.Module):
"""Compressed Linear Layer."""
def __init__(self, weight, bias, device):
super().__init__()
self.weight = weight.data.to("cpu")
self.weight_int8 = self.weight
self.bias = bias
self.device = device
def compress_weight(self):
self.weight_int8 = compress(self.weight.data.to(self.device), default_compression_config)
def forward(self, input: Tensor) -> Tensor:
weight = decompress(self.weight_int8, default_compression_config)
return F.linear(input, weight, self.bias)
def decompress_weight(self):
self.weight_int8 = self.weight
def replace_linear(module, target_device):
for attr_str in dir(module):
target_attr = getattr(module, attr_str)
if type(target_attr) == torch.nn.Linear:
setattr(
module,
attr_str,
CLinear_V2(target_attr.weight, target_attr.bias, target_device),
)
for name, child in module.named_children():
replace_linear(child, target_device) | null |
15,609 | import dataclasses
import torch
from torch import Tensor
import torch.nn as nn
from torch.nn import functional as F
class CLinear_V2(nn.Module):
"""Compressed Linear Layer."""
def __init__(self, weight, bias, device):
super().__init__()
self.weight = weight.data.to("cpu")
self.weight_int8 = self.weight
self.bias = bias
self.device = device
def compress_weight(self):
self.weight_int8 = compress(self.weight.data.to(self.device), default_compression_config)
def forward(self, input: Tensor) -> Tensor:
weight = decompress(self.weight_int8, default_compression_config)
return F.linear(input, weight, self.bias)
def decompress_weight(self):
self.weight_int8 = self.weight
def compress_module_V2(module):
for attr_str in dir(module):
target_attr = getattr(module, attr_str)
if type(target_attr) == CLinear_V2:
target_attr.compress_weight()
for name, child in module.named_children():
compress_module_V2(child) | null |
15,610 | import dataclasses
import torch
from torch import Tensor
import torch.nn as nn
from torch.nn import functional as F
class CLinear_V2(nn.Module):
"""Compressed Linear Layer."""
def __init__(self, weight, bias, device):
super().__init__()
self.weight = weight.data.to("cpu")
self.weight_int8 = self.weight
self.bias = bias
self.device = device
def compress_weight(self):
self.weight_int8 = compress(self.weight.data.to(self.device), default_compression_config)
def forward(self, input: Tensor) -> Tensor:
weight = decompress(self.weight_int8, default_compression_config)
return F.linear(input, weight, self.bias)
def decompress_weight(self):
self.weight_int8 = self.weight
def decompress_module_V2(module):
for attr_str in dir(module):
target_attr = getattr(module, attr_str)
if type(target_attr) == CLinear_V2:
target_attr.decompress_weight()
for name, child in module.named_children():
decompress_module_V2(child) | null |
15,611 | import dataclasses
import torch
from torch import Tensor
import torch.nn as nn
from torch.nn import functional as F
The provided code snippet includes necessary dependencies for implementing the `compress` function. Write a Python function `def compress(tensor, config)` to solve the following problem:
Simulate group-wise quantization.
Here is the function:
def compress(tensor, config):
"""Simulate group-wise quantization."""
if not config.enabled:
return tensor
group_size, num_bits, group_dim, symmetric = (
config.group_size,
config.num_bits,
config.group_dim,
config.symmetric,
)
assert num_bits <= 8
original_shape = tensor.shape
num_groups = (original_shape[group_dim] + group_size - 1) // group_size
new_shape = (
original_shape[:group_dim]
+ (num_groups, group_size)
+ original_shape[group_dim + 1 :]
)
# Pad
pad_len = (group_size - original_shape[group_dim] % group_size) % group_size
if pad_len != 0:
pad_shape = (
original_shape[:group_dim] + (pad_len,) + original_shape[group_dim + 1 :]
)
tensor = torch.cat(
[tensor, torch.zeros(pad_shape, dtype=tensor.dtype, device=tensor.device)],
dim=group_dim,
)
data = tensor.view(new_shape)
# Quantize
if symmetric:
B = 2 ** (num_bits - 1) - 1
scale = B / torch.max(data.abs(), dim=group_dim + 1, keepdim=True)[0]
data = data * scale
data = data.clamp_(-B, B).round_().to(torch.int8)
return data, scale, original_shape
else:
B = 2**num_bits - 1
mn = torch.min(data, dim=group_dim + 1, keepdim=True)[0]
mx = torch.max(data, dim=group_dim + 1, keepdim=True)[0]
scale = B / (mx - mn)
data = data - mn
data.mul_(scale)
data = data.clamp_(0, B).round_().to(torch.uint8)
return data, mn, scale, original_shape | Simulate group-wise quantization. |
15,612 | import dataclasses
import torch
from torch import Tensor
import torch.nn as nn
from torch.nn import functional as F
The provided code snippet includes necessary dependencies for implementing the `decompress` function. Write a Python function `def decompress(packed_data, config)` to solve the following problem:
Simulate group-wise dequantization.
Here is the function:
def decompress(packed_data, config):
"""Simulate group-wise dequantization."""
if not config.enabled:
return packed_data
group_size, num_bits, group_dim, symmetric = (
config.group_size,
config.num_bits,
config.group_dim,
config.symmetric,
)
# Dequantize
if symmetric:
data, scale, original_shape = packed_data
data = data / scale
else:
data, mn, scale, original_shape = packed_data
data = data / scale
data.add_(mn)
# Unpad
pad_len = (group_size - original_shape[group_dim] % group_size) % group_size
if pad_len:
padded_original_shape = (
original_shape[:group_dim]
+ (original_shape[group_dim] + pad_len,)
+ original_shape[group_dim + 1 :]
)
data = data.reshape(padded_original_shape)
indices = [slice(0, x) for x in original_shape]
return data[indices].contiguous()
else:
return data.view(original_shape) | Simulate group-wise dequantization. |
15,613 | import torch
import numpy as np
from decord import VideoReader
from decord import cpu
import uuid
import os
import torchvision.transforms as transforms
import math
import time
import cv2
import random
def add_points_to_image(image, points, size=5):
# h, w, = image.shape[:2]
# print(image.shape)
# print(image.dtype)
# print(points)
# print(size)
# print('*' * 40)
for x, y in points['end']:
cv2.circle(image, (y, x), size, (255, 0, 0), -1)
for x, y in points['start']:
cv2.circle(image, (y, x), size, (0, 0, 255), -1)
return image | null |
15,614 | import torch
import numpy as np
from decord import VideoReader
from decord import cpu
import uuid
import os
import torchvision.transforms as transforms
import math
import time
import cv2
import random
def to_image(tensor):
tensor = tensor.squeeze(0).permute(1, 2, 0).contiguous()
arr = tensor.detach().cpu().numpy()
arr = (arr - arr.min()) / (arr.max() - arr.min())
arr = arr * 255
return arr.astype(np.uint8) | null |
15,615 | import torch
import numpy as np
from decord import VideoReader
from decord import cpu
import uuid
import os
import torchvision.transforms as transforms
import math
import time
import cv2
import random
def gen_new_seed():
return random.randint(0, 65535)
def seed_everything(seed):
if seed == -1:
seed = gen_new_seed()
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
return seed | null |
15,616 | import torch
import numpy as np
from decord import VideoReader
from decord import cpu
import uuid
import os
import torchvision.transforms as transforms
import math
import time
import cv2
import random
def resize_800(image):
w, h = image.size
if w > h:
ratio = w * 1.0 / 800
new_w, new_h = 800, int(h * 1.0 / ratio)
else:
ratio = h * 1.0 / 800
new_w, new_h = int(w * 1.0 / ratio), 800
image = image.resize((new_w, new_h))
return image | null |
15,617 | import torch
import numpy as np
from decord import VideoReader
from decord import cpu
import uuid
import os
import torchvision.transforms as transforms
import math
import time
import cv2
import random
def prompts(name, description):
def decorator(func):
func.name = name
func.description = description
return func
return decorator | null |
15,618 | import torch
import numpy as np
from decord import VideoReader
from decord import cpu
import uuid
import os
import torchvision.transforms as transforms
import math
import time
import cv2
import random
def gen_new_name(orginal_name, suffix="update", ext="png"):
root_path, filename = os.path.split(orginal_name)
name_split = os.path.splitext(filename)[0].split('_')
this_new_uuid = str(uuid.uuid4())[:3]
timestamp = int(math.modf(time.time())[0] * 1000)
prev_file_name = name_split[0]
# if len(name_split) == 1:
# prev_file_name = name_split[0]
# else:
# # assert len(name_split) == 3
# prev_file_name = name_split[0]
if len(suffix.strip()) == 0:
new_file_name = f'{this_new_uuid}{timestamp:03d}_{prev_file_name}.{ext}'
else:
new_file_name = f'{this_new_uuid}{timestamp:03d}_{prev_file_name}_{suffix}.{ext}'
return os.path.join(root_path, new_file_name) | null |
15,619 | import torch
import numpy as np
from decord import VideoReader
from decord import cpu
import uuid
import os
import torchvision.transforms as transforms
import math
import time
import cv2
import random
def dilate_mask(mask, dilate_factor=9):
# dilate mask
mask = mask.astype(np.uint8)
dilated_mask = cv2.dilate(mask, np.ones((dilate_factor, dilate_factor), np.uint8), iterations=1)
return dilated_mask | null |
15,620 | import torch
import numpy as np
from decord import VideoReader
from decord import cpu
import uuid
import os
import torchvision.transforms as transforms
import math
import time
import cv2
import random
def cal_dilate_factor(mask):
area = mask[mask != 0].sum()
edge = cv2.Canny(mask, 30, 226)
perimeter = edge.sum()
ratio = 0
if perimeter > 0:
ratio = int(area * 0.55 / perimeter)
if ratio % 2 == 0:
ratio += 1
return ratio | null |
15,621 | import torch
import numpy as np
from decord import VideoReader
from decord import cpu
import uuid
import os
import torchvision.transforms as transforms
import math
import time
import cv2
import random
def blend_gt2pt(old_image, new_image, sigma=0.15, steps=100):
new_size = new_image.size
old_size = old_image.size
easy_img = np.array(new_image)
gt_img_array = np.array(old_image)
pos_w = (new_size[0] - old_size[0]) // 2
pos_h = (new_size[1] - old_size[1]) // 2
kernel_h = cv2.getGaussianKernel(old_size[1], old_size[1] * sigma)
kernel_w = cv2.getGaussianKernel(old_size[0], old_size[0] * sigma)
kernel = np.multiply(kernel_h, np.transpose(kernel_w))
kernel[steps:-steps, steps:-steps] = 1
kernel[:steps, :steps] = kernel[:steps, :steps] / kernel[steps - 1, steps - 1]
kernel[:steps, -steps:] = kernel[:steps, -steps:] / kernel[steps - 1, -(steps)]
kernel[-steps:, :steps] = kernel[-steps:, :steps] / kernel[-steps, steps - 1]
kernel[-steps:, -steps:] = kernel[-steps:, -steps:] / kernel[-steps, -steps]
kernel = np.expand_dims(kernel, 2)
kernel = np.repeat(kernel, 3, 2)
weight = np.linspace(0, 1, steps)
top = np.expand_dims(weight, 1)
top = np.repeat(top, old_size[0] - 2 * steps, 1)
top = np.expand_dims(top, 2)
top = np.repeat(top, 3, 2)
weight = np.linspace(1, 0, steps)
down = np.expand_dims(weight, 1)
down = np.repeat(down, old_size[0] - 2 * steps, 1)
down = np.expand_dims(down, 2)
down = np.repeat(down, 3, 2)
weight = np.linspace(0, 1, steps)
left = np.expand_dims(weight, 0)
left = np.repeat(left, old_size[1] - 2 * steps, 0)
left = np.expand_dims(left, 2)
left = np.repeat(left, 3, 2)
weight = np.linspace(1, 0, steps)
right = np.expand_dims(weight, 0)
right = np.repeat(right, old_size[1] - 2 * steps, 0)
right = np.expand_dims(right, 2)
right = np.repeat(right, 3, 2)
kernel[:steps, steps:-steps] = top
kernel[-steps:, steps:-steps] = down
kernel[steps:-steps, :steps] = left
kernel[steps:-steps, -steps:] = right
pt_gt_img = easy_img[pos_h:pos_h + old_size[1], pos_w:pos_w + old_size[0]]
gaussian_gt_img = kernel * gt_img_array + (1 - kernel) * pt_gt_img # gt img with blur img
gaussian_gt_img = gaussian_gt_img.astype(np.int64)
easy_img[pos_h:pos_h + old_size[1], pos_w:pos_w + old_size[0]] = gaussian_gt_img
gaussian_img = Image.fromarray(easy_img)
return gaussian_img | null |
15,622 | import torch
import numpy as np
from decord import VideoReader
from decord import cpu
import uuid
import os
import torchvision.transforms as transforms
import math
import time
import cv2
import random
def loadvideo_decord(sample, sample_rate_scale=1,new_width=384, new_height=384, clip_len=8, frame_sample_rate=2,num_segment=1):
fname = sample
vr = VideoReader(fname, width=new_width, height=new_height,
num_threads=1, ctx=cpu(0))
# handle temporal segments
converted_len = int(clip_len * frame_sample_rate)
seg_len = len(vr) //num_segment
duration = max(len(vr) // vr.get_avg_fps(),8)
all_index = []
for i in range(num_segment):
index = np.linspace(0, seg_len, num=int(duration))
index = np.clip(index, 0, seg_len - 1).astype(np.int64)
index = index + i*seg_len
all_index.extend(list(index))
all_index = all_index[::int(sample_rate_scale)]
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer | null |
15,623 | import torch
import numpy as np
from decord import VideoReader
from decord import cpu
import uuid
import os
import torchvision.transforms as transforms
import math
import time
import cv2
import random
def loadvideo_decord_origin(self, sample, sample_rate_scale=1,new_width=384, new_height=384, clip_len=8, frame_sample_rate=2,num_segment=1):
fname = sample
vr = VideoReader(fname,
num_threads=1, ctx=cpu(0))
# handle temporal segments
converted_len = int(clip_len * frame_sample_rate)
seg_len = len(vr) //num_segment
duration = max(len(vr) // vr.get_avg_fps(),8)
all_index = []
for i in range(num_segment):
index = np.linspace(0, seg_len, num=int(duration))
index = np.clip(index, 0, seg_len - 1).astype(np.int64)
index = index + i*seg_len
all_index.extend(list(index))
all_index = all_index[::int(sample_rate_scale)]
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer | null |
15,624 | import logging
import math
import os
import torch
import torch.nn as nn
import torchaudio
from PIL import Image
from pytorchvideo import transforms as pv_transforms
from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler
from pytorchvideo.data.encoded_video import EncodedVideo
from torchvision import transforms
from torchvision.transforms._transforms_video import NormalizeVideo
from .models.multimodal_preprocessors import SimpleTokenizer
def load_and_transform_vision_data(image_paths, device):
if image_paths is None:
return None
image_ouputs = []
for image_path in image_paths:
data_transform = transforms.Compose(
[
transforms.Resize(
224, interpolation=transforms.InterpolationMode.BICUBIC
),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711),
),
]
)
with open(image_path, "rb") as fopen:
image = Image.open(fopen).convert("RGB")
image = data_transform(image).to(device)
image_ouputs.append(image)
return torch.stack(image_ouputs, dim=0) | null |
15,625 | import logging
import math
import os
import torch
import torch.nn as nn
import torchaudio
from PIL import Image
from pytorchvideo import transforms as pv_transforms
from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler
from pytorchvideo.data.encoded_video import EncodedVideo
from torchvision import transforms
from torchvision.transforms._transforms_video import NormalizeVideo
from .models.multimodal_preprocessors import SimpleTokenizer
def load_and_transform_depth_data(depth_paths, device):
if depth_paths is None:
return None
depth_ouputs = []
for depth_path in depth_paths:
data_transform = transforms.Compose(
[
transforms.Resize(
224, interpolation=transforms.InterpolationMode.BICUBIC
),
transforms.CenterCrop(224),
transforms.ToTensor(),
# transforms.Normalize((0.5, ), (0.5, )) # if I use this normalization, I cannot get good results...
]
)
with open(depth_path, "rb") as fopen:
image = Image.open(fopen).convert("L")
image = data_transform(image).to(device)
depth_ouputs.append(image)
return torch.stack(depth_ouputs, dim=0) | null |
15,626 | import logging
import math
import os
import torch
import torch.nn as nn
import torchaudio
from PIL import Image
from pytorchvideo import transforms as pv_transforms
from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler
from pytorchvideo.data.encoded_video import EncodedVideo
from torchvision import transforms
from torchvision.transforms._transforms_video import NormalizeVideo
from .models.multimodal_preprocessors import SimpleTokenizer
def load_and_transform_thermal_data(thermal_paths, device):
if thermal_paths is None:
return None
thermal_ouputs = []
for thermal_path in thermal_paths:
data_transform = transforms.Compose(
[
transforms.Resize(
224, interpolation=transforms.InterpolationMode.BICUBIC
),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.5, ), (0.5, ))
]
)
with open(thermal_path, "rb") as fopen:
image = Image.open(fopen).convert("L")
image = data_transform(image).to(device)
thermal_ouputs.append(image)
return torch.stack(thermal_ouputs, dim=0) | null |
15,627 | import logging
import math
import os
import torch
import torch.nn as nn
import torchaudio
from PIL import Image
from pytorchvideo import transforms as pv_transforms
from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler
from pytorchvideo.data.encoded_video import EncodedVideo
from torchvision import transforms
from torchvision.transforms._transforms_video import NormalizeVideo
from .models.multimodal_preprocessors import SimpleTokenizer
BPE_PATH = os.path.join(CURRENT_DIR, "bpe/bpe_simple_vocab_16e6.txt.gz")
class SimpleTokenizer(object):
def __init__(self, bpe_path: str, context_length=77):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with g_pathmgr.open(bpe_path, "rb") as fh:
bpe_bytes = io.BytesIO(fh.read())
merges = gzip.open(bpe_bytes).read().decode("utf-8").split("\n")
merges = merges[1 : 49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + "</w>" for v in vocab]
for merge in merges:
vocab.append("".join(merge))
vocab.extend(["<|startoftext|>", "<|endoftext|>"])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {
"<|startoftext|>": "<|startoftext|>",
"<|endoftext|>": "<|endoftext|>",
}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE,
)
self.context_length = context_length
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + "</w>",)
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(
self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")
)
return bpe_tokens
def decode(self, tokens):
text = "".join([self.decoder[token] for token in tokens])
text = (
bytearray([self.byte_decoder[c] for c in text])
.decode("utf-8", errors="replace")
.replace("</w>", " ")
)
return text
def __call__(self, texts, context_length=None):
if not context_length:
context_length = self.context_length
if isinstance(texts, str):
texts = [texts]
sot_token = self.encoder["<|startoftext|>"]
eot_token = self.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
tokens = tokens[:context_length]
result[i, : len(tokens)] = torch.tensor(tokens)
if len(result) == 1:
return result[0]
return result
def load_and_transform_text(text, device):
if text is None:
return None
tokenizer = SimpleTokenizer(bpe_path=BPE_PATH)
tokens = [tokenizer(t).unsqueeze(0).to(device) for t in text]
tokens = torch.cat(tokens, dim=0)
return tokens | null |
15,628 | import logging
import math
import os
import torch
import torch.nn as nn
import torchaudio
from PIL import Image
from pytorchvideo import transforms as pv_transforms
from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler
from pytorchvideo.data.encoded_video import EncodedVideo
from torchvision import transforms
from torchvision.transforms._transforms_video import NormalizeVideo
from .models.multimodal_preprocessors import SimpleTokenizer
def waveform2melspec(waveform, sample_rate, num_mel_bins, target_length):
# Based on https://github.com/YuanGongND/ast/blob/d7d8b4b8e06cdaeb6c843cdb38794c1c7692234c/src/dataloader.py#L102
waveform -= waveform.mean()
fbank = torchaudio.compliance.kaldi.fbank(
waveform,
htk_compat=True,
sample_frequency=sample_rate,
use_energy=False,
window_type="hanning",
num_mel_bins=num_mel_bins,
dither=0.0,
frame_length=25,
frame_shift=DEFAULT_AUDIO_FRAME_SHIFT_MS,
)
# Convert to [mel_bins, num_frames] shape
fbank = fbank.transpose(0, 1)
# Pad to target_length
n_frames = fbank.size(1)
p = target_length - n_frames
# if p is too large (say >20%), flash a warning
if abs(p) / n_frames > 0.2:
logging.warning(
"Large gap between audio n_frames(%d) and "
"target_length (%d). Is the audio_target_length "
"setting correct?",
n_frames,
target_length,
)
# cut and pad
if p > 0:
fbank = torch.nn.functional.pad(fbank, (0, p), mode="constant", value=0)
elif p < 0:
fbank = fbank[:, 0:target_length]
# Convert to [1, mel_bins, num_frames] shape, essentially like a 1
# channel image
fbank = fbank.unsqueeze(0)
return fbank
def get_clip_timepoints(clip_sampler, duration):
# Read out all clips in this video
all_clips_timepoints = []
is_last_clip = False
end = 0.0
while not is_last_clip:
start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None)
all_clips_timepoints.append((start, end))
return all_clips_timepoints
def get_clip_timepoints(clip_sampler, duration):
# Read out all clips in this video
all_clips_timepoints = []
is_last_clip = False
end = 0.0
while not is_last_clip:
start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None)
all_clips_timepoints.append((start, end))
return all_clips_timepoints
def load_and_transform_audio_data(
audio_paths,
device,
num_mel_bins=128,
target_length=204,
sample_rate=16000,
clip_duration=2,
clips_per_video=3,
mean=-4.268,
std=9.138,
):
if audio_paths is None:
return None
audio_outputs = []
clip_sampler = ConstantClipsPerVideoSampler(
clip_duration=clip_duration, clips_per_video=clips_per_video
)
for audio_path in audio_paths:
waveform, sr = torchaudio.load(audio_path)
if sample_rate != sr:
waveform = torchaudio.functional.resample(
waveform, orig_freq=sr, new_freq=sample_rate
)
all_clips_timepoints = get_clip_timepoints(
clip_sampler, waveform.size(1) / sample_rate
)
all_clips = []
for clip_timepoints in all_clips_timepoints:
waveform_clip = waveform[
:,
int(clip_timepoints[0] * sample_rate): int(
clip_timepoints[1] * sample_rate
),
]
waveform_melspec = waveform2melspec(
waveform_clip, sample_rate, num_mel_bins, target_length
)
all_clips.append(waveform_melspec)
normalize = transforms.Normalize(mean=mean, std=std)
all_clips = [normalize(ac).to(device) for ac in all_clips]
all_clips = torch.stack(all_clips, dim=0)
audio_outputs.append(all_clips)
return torch.stack(audio_outputs, dim=0) | null |
15,629 | import logging
import math
import os
import torch
import torch.nn as nn
import torchaudio
from PIL import Image
from pytorchvideo import transforms as pv_transforms
from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler
from pytorchvideo.data.encoded_video import EncodedVideo
from torchvision import transforms
from torchvision.transforms._transforms_video import NormalizeVideo
from .models.multimodal_preprocessors import SimpleTokenizer
def crop_boxes(boxes, x_offset, y_offset):
"""
Peform crop on the bounding boxes given the offsets.
Args:
boxes (ndarray or None): bounding boxes to peform crop. The dimension
is `num boxes` x 4.
x_offset (int): cropping offset in the x axis.
y_offset (int): cropping offset in the y axis.
Returns:
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
cropped_boxes = boxes.copy()
cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset
cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset
return cropped_boxes
The provided code snippet includes necessary dependencies for implementing the `uniform_crop` function. Write a Python function `def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None)` to solve the following problem:
Perform uniform spatial sampling on the images and corresponding boxes. Args: images (tensor): images to perform uniform crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): size of height and weight to crop the images. spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width is larger than height. Or 0, 1, or 2 for top, center, and bottom crop if height is larger than width. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. scale_size (int): optinal. If not None, resize the images to scale_size before performing any crop. Returns: cropped (tensor): images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4.
Here is the function:
def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None):
"""
Perform uniform spatial sampling on the images and corresponding boxes.
Args:
images (tensor): images to perform uniform crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
scale_size (int): optinal. If not None, resize the images to scale_size before
performing any crop.
Returns:
cropped (tensor): images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
assert spatial_idx in [0, 1, 2]
ndim = len(images.shape)
if ndim == 3:
images = images.unsqueeze(0)
height = images.shape[2]
width = images.shape[3]
if scale_size is not None:
if width <= height:
width, height = scale_size, int(height / width * scale_size)
else:
width, height = int(width / height * scale_size), scale_size
images = torch.nn.functional.interpolate(
images,
size=(height, width),
mode="bilinear",
align_corners=False,
)
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_idx == 0:
y_offset = 0
elif spatial_idx == 2:
y_offset = height - size
else:
if spatial_idx == 0:
x_offset = 0
elif spatial_idx == 2:
x_offset = width - size
cropped = images[:, :, y_offset: y_offset + size, x_offset: x_offset + size]
cropped_boxes = crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
if ndim == 3:
cropped = cropped.squeeze(0)
return cropped, cropped_boxes | Perform uniform spatial sampling on the images and corresponding boxes. Args: images (tensor): images to perform uniform crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): size of height and weight to crop the images. spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width is larger than height. Or 0, 1, or 2 for top, center, and bottom crop if height is larger than width. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. scale_size (int): optinal. If not None, resize the images to scale_size before performing any crop. Returns: cropped (tensor): images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. |
15,630 | import logging
import math
import os
import torch
import torch.nn as nn
import torchaudio
from PIL import Image
from pytorchvideo import transforms as pv_transforms
from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler
from pytorchvideo.data.encoded_video import EncodedVideo
from torchvision import transforms
from torchvision.transforms._transforms_video import NormalizeVideo
from .models.multimodal_preprocessors import SimpleTokenizer
def get_clip_timepoints(clip_sampler, duration):
# Read out all clips in this video
all_clips_timepoints = []
is_last_clip = False
end = 0.0
while not is_last_clip:
start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None)
all_clips_timepoints.append((start, end))
return all_clips_timepoints
def get_clip_timepoints(clip_sampler, duration):
# Read out all clips in this video
all_clips_timepoints = []
is_last_clip = False
end = 0.0
while not is_last_clip:
start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None)
all_clips_timepoints.append((start, end))
return all_clips_timepoints
class SpatialCrop(nn.Module):
"""
Convert the video into 3 smaller clips spatially. Must be used after the
temporal crops to get spatial crops, and should be used with
-2 in the spatial crop at the slowfast augmentation stage (so full
frames are passed in here). Will return a larger list with the
3x spatial crops as well.
"""
def __init__(self, crop_size: int = 224, num_crops: int = 3):
super().__init__()
self.crop_size = crop_size
if num_crops == 3:
self.crops_to_ext = [0, 1, 2]
self.flipped_crops_to_ext = []
elif num_crops == 1:
self.crops_to_ext = [1]
self.flipped_crops_to_ext = []
else:
raise NotImplementedError("Nothing else supported yet")
def forward(self, videos):
"""
Args:
videos: A list of C, T, H, W videos.
Returns:
videos: A list with 3x the number of elements. Each video converted
to C, T, H', W' by spatial cropping.
"""
assert isinstance(videos, list), "Must be a list of videos after temporal crops"
assert all([video.ndim == 4 for video in videos]), "Must be (C,T,H,W)"
res = []
for video in videos:
for spatial_idx in self.crops_to_ext:
res.append(uniform_crop(video, self.crop_size, spatial_idx)[0])
if not self.flipped_crops_to_ext:
continue
flipped_video = transforms.functional.hflip(video)
for spatial_idx in self.flipped_crops_to_ext:
res.append(uniform_crop(flipped_video, self.crop_size, spatial_idx)[0])
return res
def load_and_transform_video_data(
video_paths,
device,
clip_duration=2,
clips_per_video=5,
sample_rate=16000,
):
if video_paths is None:
return None
video_outputs = []
video_transform = transforms.Compose(
[
pv_transforms.ShortSideScale(224),
NormalizeVideo(
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711),
),
]
)
clip_sampler = ConstantClipsPerVideoSampler(
clip_duration=clip_duration, clips_per_video=clips_per_video
)
frame_sampler = pv_transforms.UniformTemporalSubsample(num_samples=clip_duration)
for video_path in video_paths:
video = EncodedVideo.from_path(
video_path,
decoder="decord",
decode_audio=False,
**{"sample_rate": sample_rate},
)
all_clips_timepoints = get_clip_timepoints(clip_sampler, video.duration)
all_video = []
for clip_timepoints in all_clips_timepoints:
# Read the clip, get frames
clip = video.get_clip(clip_timepoints[0], clip_timepoints[1])
if clip is None:
raise ValueError("No clip found")
video_clip = frame_sampler(clip["video"])
video_clip = video_clip / 255.0 # since this is float, need 0-1
all_video.append(video_clip)
all_video = [video_transform(clip) for clip in all_video]
all_video = SpatialCrop(224, num_crops=3)(all_video)
all_video = torch.stack(all_video, dim=0)
video_outputs.append(all_video)
return torch.stack(video_outputs, dim=0).to(device) | null |
15,631 | import os
import urllib
from functools import partial
from types import SimpleNamespace
import torch
import torch.nn as nn
from .helpers import (
EinOpsRearrange,
LearnableLogitScaling,
Normalize,
SelectElement,
SelectEOSAndProject,
)
from .multimodal_preprocessors import (
AudioPreprocessor,
IMUPreprocessor,
PadIm2Video,
PatchEmbedGeneric,
RGBDTPreprocessor,
SpatioTemporalPosEmbeddingHelper,
TextPreprocessor,
ThermalPreprocessor,
)
from .transformer import MultiheadAttention, SimpleTransformer
class ImageBindModel(nn.Module):
def __init__(
self,
video_frames=2,
kernel_size=(2, 14, 14),
audio_kernel_size=16,
audio_stride=10,
out_embed_dim=768,
vision_embed_dim=1024,
vision_num_blocks=24,
vision_num_heads=16,
audio_embed_dim=768,
audio_num_blocks=12,
audio_num_heads=12,
audio_num_mel_bins=128,
audio_target_len=204,
audio_drop_path=0.1,
text_embed_dim=768,
text_num_blocks=12,
text_num_heads=12,
depth_embed_dim=384,
depth_kernel_size=16,
depth_num_blocks=12,
depth_num_heads=8,
depth_drop_path=0.0,
thermal_embed_dim=768,
thermal_kernel_size=16,
thermal_num_blocks=12,
thermal_num_heads=12,
thermal_drop_path=0.0,
imu_embed_dim=512,
imu_kernel_size=8,
imu_num_blocks=6,
imu_num_heads=8,
imu_drop_path=0.7,
):
def _create_modality_preprocessors(
self,
video_frames=2,
vision_embed_dim=1024,
kernel_size=(2, 14, 14),
text_embed_dim=768,
audio_embed_dim=768,
audio_kernel_size=16,
audio_stride=10,
audio_num_mel_bins=128,
audio_target_len=204,
depth_embed_dim=768,
depth_kernel_size=16,
thermal_embed_dim=768,
thermal_kernel_size=16,
imu_embed_dim=512,
):
def _create_modality_trunks(
self,
vision_embed_dim=1024,
vision_num_blocks=24,
vision_num_heads=16,
text_embed_dim=768,
text_num_blocks=12,
text_num_heads=12,
audio_embed_dim=768,
audio_num_blocks=12,
audio_num_heads=12,
audio_drop_path=0.0,
depth_embed_dim=768,
depth_num_blocks=12,
depth_num_heads=12,
depth_drop_path=0.0,
thermal_embed_dim=768,
thermal_num_blocks=12,
thermal_num_heads=12,
thermal_drop_path=0.0,
imu_embed_dim=512,
imu_num_blocks=6,
imu_num_heads=8,
imu_drop_path=0.7,
):
def instantiate_trunk(
embed_dim, num_blocks, num_heads, pre_transformer_ln, add_bias_kv, drop_path
):
def _create_modality_heads(
self,
out_embed_dim,
vision_embed_dim,
text_embed_dim,
audio_embed_dim,
depth_embed_dim,
thermal_embed_dim,
imu_embed_dim,
):
def _create_modality_postprocessors(self, out_embed_dim):
def forward(self, inputs, normalize=True):
def imagebind_huge(pretrained=False):
model = ImageBindModel(
vision_embed_dim=1280,
vision_num_blocks=32,
vision_num_heads=16,
text_embed_dim=1024,
text_num_blocks=24,
text_num_heads=16,
out_embed_dim=1024,
audio_drop_path=0.1,
imu_drop_path=0.7,
)
if pretrained:
if not os.path.exists("checkpoints/imagebind_huge.pth"):
print(
"Downloading imagebind weights to .checkpoints/imagebind_huge.pth ..."
)
os.makedirs("checkpoints", exist_ok=True)
torch.hub.download_url_to_file(
"https://dl.fbaipublicfiles.com/imagebind/imagebind_huge.pth",
"checkpoints/imagebind_huge.pth",
progress=True,
)
model.load_state_dict(torch.load("checkpoints/imagebind_huge.pth"))
return model | null |
15,632 | import gzip
import html
import io
import math
from functools import lru_cache
from typing import Callable, List, Optional
import ftfy
import numpy as np
import regex as re
import torch
import torch.nn as nn
from iopath.common.file_io import g_pathmgr
from timm.models.layers import trunc_normal_
from .helpers import cast_if_src_dtype, VerboseNNModule
The provided code snippet includes necessary dependencies for implementing the `get_sinusoid_encoding_table` function. Write a Python function `def get_sinusoid_encoding_table(n_position, d_hid)` to solve the following problem:
Sinusoid position encoding table
Here is the function:
def get_sinusoid_encoding_table(n_position, d_hid):
"""Sinusoid position encoding table"""
# TODO: make it with torch instead of numpy
def get_position_angle_vec(position):
return [
position / np.power(10000, 2 * (hid_j // 2) / d_hid)
for hid_j in range(d_hid)
]
sinusoid_table = np.array(
[get_position_angle_vec(pos_i) for pos_i in range(n_position)]
)
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return torch.FloatTensor(sinusoid_table).unsqueeze(0) | Sinusoid position encoding table |
15,633 | import gzip
import html
import io
import math
from functools import lru_cache
from typing import Callable, List, Optional
import ftfy
import numpy as np
import regex as re
import torch
import torch.nn as nn
from iopath.common.file_io import g_pathmgr
from timm.models.layers import trunc_normal_
from .helpers import cast_if_src_dtype, VerboseNNModule
def interpolate_pos_encoding(
npatch_per_img,
pos_embed,
patches_layout,
input_shape=None,
first_patch_idx=1,
):
assert first_patch_idx == 0 or first_patch_idx == 1, "there is 1 CLS token or none"
N = pos_embed.shape[1] - first_patch_idx # since it's 1 if cls_token exists
if npatch_per_img == N:
return pos_embed
assert (
patches_layout[-1] == patches_layout[-2]
), "Interpolation of pos embed not supported for non-square layouts"
class_emb = pos_embed[:, :first_patch_idx]
pos_embed = pos_embed[:, first_patch_idx:]
if input_shape is None or patches_layout[0] == 1:
# simple 2D pos embedding, no temporal component
pos_embed = interpolate_pos_encoding_2d(npatch_per_img, pos_embed)
elif patches_layout[0] > 1:
# pos embed has a temporal component
assert len(input_shape) == 4, "temporal interpolation not supported"
# we only support 2D interpolation in this case
num_frames = patches_layout[0]
num_spatial_tokens = patches_layout[1] * patches_layout[2]
pos_embed = pos_embed.view(1, num_frames, num_spatial_tokens, -1)
# interpolate embedding for zeroth frame
pos_embed = interpolate_pos_encoding_2d(
npatch_per_img, pos_embed[0, 0, ...].unsqueeze(0)
)
else:
raise ValueError("This type of interpolation isn't implemented")
return torch.cat((class_emb, pos_embed), dim=1)
def _get_pos_embedding(
npatch_per_img,
pos_embed,
patches_layout,
input_shape,
first_patch_idx=1,
):
pos_embed = interpolate_pos_encoding(
npatch_per_img,
pos_embed,
patches_layout,
input_shape=input_shape,
first_patch_idx=first_patch_idx,
)
return pos_embed | null |
15,634 | import gzip
import html
import io
import math
from functools import lru_cache
from typing import Callable, List, Optional
import ftfy
import numpy as np
import regex as re
import torch
import torch.nn as nn
from iopath.common.file_io import g_pathmgr
from timm.models.layers import trunc_normal_
from .helpers import cast_if_src_dtype, VerboseNNModule
def build_causal_attention_mask(context_length):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(context_length, context_length, requires_grad=False)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask | null |
15,635 | import gzip
import html
import io
import math
from functools import lru_cache
from typing import Callable, List, Optional
import ftfy
import numpy as np
import regex as re
import torch
import torch.nn as nn
from iopath.common.file_io import g_pathmgr
from timm.models.layers import trunc_normal_
from .helpers import cast_if_src_dtype, VerboseNNModule
The provided code snippet includes necessary dependencies for implementing the `bytes_to_unicode` function. Write a Python function `def bytes_to_unicode()` to solve the following problem:
Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on.
Here is the function:
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs)) | Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. |
15,636 | import gzip
import html
import io
import math
from functools import lru_cache
from typing import Callable, List, Optional
import ftfy
import numpy as np
import regex as re
import torch
import torch.nn as nn
from iopath.common.file_io import g_pathmgr
from timm.models.layers import trunc_normal_
from .helpers import cast_if_src_dtype, VerboseNNModule
The provided code snippet includes necessary dependencies for implementing the `get_pairs` function. Write a Python function `def get_pairs(word)` to solve the following problem:
Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings).
Here is the function:
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs | Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). |
15,637 | import gzip
import html
import io
import math
from functools import lru_cache
from typing import Callable, List, Optional
import ftfy
import numpy as np
import regex as re
import torch
import torch.nn as nn
from iopath.common.file_io import g_pathmgr
from timm.models.layers import trunc_normal_
from .helpers import cast_if_src_dtype, VerboseNNModule
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip() | null |
15,638 | import gzip
import html
import io
import math
from functools import lru_cache
from typing import Callable, List, Optional
import ftfy
import numpy as np
import regex as re
import torch
import torch.nn as nn
from iopath.common.file_io import g_pathmgr
from timm.models.layers import trunc_normal_
from .helpers import cast_if_src_dtype, VerboseNNModule
def whitespace_clean(text):
text = re.sub(r"\s+", " ", text)
text = text.strip()
return text | null |
15,639 | import os
import sys
import torch
from omegaconf import OmegaConf
import numpy as np
from .ldm.models.diffusion.ddim import DDIMSampler
from .ldm.util import instantiate_from_config
def make_batch(image, mask, device):
image = image.astype(np.float32) / 255.0
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
mask = mask.astype(np.float32) / 255.0
mask = mask[None, None]
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
mask = torch.from_numpy(mask)
masked_image = (1 - mask) * image
batch = {"image": image, "mask": mask, "masked_image": masked_image}
for k in batch:
batch[k] = batch[k].to(device=device)
batch[k] = batch[k] * 2.0 - 1.0
return batch | null |
15,640 | import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if schedule == "linear":
betas = (
torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
)
elif schedule == "cosine":
timesteps = (
torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
)
alphas = timesteps / (1 + cosine_s) * np.pi / 2
alphas = torch.cos(alphas).pow(2)
alphas = alphas / alphas[0]
betas = 1 - alphas[1:] / alphas[:-1]
betas = np.clip(betas, a_min=0, a_max=0.999)
elif schedule == "sqrt_linear":
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
elif schedule == "sqrt":
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
else:
raise ValueError(f"schedule '{schedule}' unknown.")
return betas.numpy() | null |
15,641 | import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
if ddim_discr_method == 'uniform':
c = num_ddpm_timesteps // num_ddim_timesteps
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
elif ddim_discr_method == 'quad':
ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
else:
raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
# add one to get the final alpha values right (the ones from first scale to data during sampling)
steps_out = ddim_timesteps + 1
if verbose:
print(f'Selected timesteps for ddim sampler: {steps_out}')
return steps_out | null |
15,642 | import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
# select alphas for computing the variance schedule
alphas = alphacums[ddim_timesteps]
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
# according the the formula provided in https://arxiv.org/abs/2010.02502
sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
if verbose:
print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
print(f'For the chosen value of eta, which is {eta}, '
f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
return sigmas, alphas, alphas_prev | null |
15,643 | import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
The provided code snippet includes necessary dependencies for implementing the `betas_for_alpha_bar` function. Write a Python function `def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999)` to solve the following problem:
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. :param num_diffusion_timesteps: the number of betas to produce. :param alpha_bar: a lambda that takes an argument t from 0 to 1 and produces the cumulative product of (1-beta) up to that part of the diffusion process. :param max_beta: the maximum beta to use; use values lower than 1 to prevent singularities.
Here is the function:
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas) | Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. :param num_diffusion_timesteps: the number of betas to produce. :param alpha_bar: a lambda that takes an argument t from 0 to 1 and produces the cumulative product of (1-beta) up to that part of the diffusion process. :param max_beta: the maximum beta to use; use values lower than 1 to prevent singularities. |
15,644 | import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
def extract_into_tensor(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1))) | null |
15,645 | import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
class CheckpointFunction(torch.autograd.Function):
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_tensors = list(args[:length])
ctx.input_params = list(args[length:])
with torch.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
def backward(ctx, *output_grads):
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
with torch.enable_grad():
# Fixes a bug where the first op in run_function modifies the
# Tensor storage in place, which is not allowed for detach()'d
# Tensors.
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
output_tensors = ctx.run_function(*shallow_copies)
input_grads = torch.autograd.grad(
output_tensors,
ctx.input_tensors + ctx.input_params,
output_grads,
allow_unused=True,
)
del ctx.input_tensors
del ctx.input_params
del output_tensors
return (None, None) + input_grads
The provided code snippet includes necessary dependencies for implementing the `checkpoint` function. Write a Python function `def checkpoint(func, inputs, params, flag)` to solve the following problem:
Evaluate a function without caching intermediate activations, allowing for reduced memory at the expense of extra compute in the backward pass. :param func: the function to evaluate. :param inputs: the argument sequence to pass to `func`. :param params: a sequence of parameters `func` depends on but does not explicitly take as arguments. :param flag: if False, disable gradient checkpointing.
Here is the function:
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs) | Evaluate a function without caching intermediate activations, allowing for reduced memory at the expense of extra compute in the backward pass. :param func: the function to evaluate. :param inputs: the argument sequence to pass to `func`. :param params: a sequence of parameters `func` depends on but does not explicitly take as arguments. :param flag: if False, disable gradient checkpointing. |
15,646 | import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
The provided code snippet includes necessary dependencies for implementing the `timestep_embedding` function. Write a Python function `def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False)` to solve the following problem:
Create sinusoidal timestep embeddings. :param timesteps: a 1-D Tensor of N indices, one per batch element. These may be fractional. :param dim: the dimension of the output. :param max_period: controls the minimum frequency of the embeddings. :return: an [N x dim] Tensor of positional embeddings.
Here is the function:
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
if not repeat_only:
half = dim // 2
freqs = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
else:
embedding = repeat(timesteps, 'b -> b d', d=dim)
return embedding | Create sinusoidal timestep embeddings. :param timesteps: a 1-D Tensor of N indices, one per batch element. These may be fractional. :param dim: the dimension of the output. :param max_period: controls the minimum frequency of the embeddings. :return: an [N x dim] Tensor of positional embeddings. |
15,647 | import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
The provided code snippet includes necessary dependencies for implementing the `zero_module` function. Write a Python function `def zero_module(module)` to solve the following problem:
Zero out the parameters of a module and return it.
Here is the function:
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module | Zero out the parameters of a module and return it. |
15,648 | import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
The provided code snippet includes necessary dependencies for implementing the `scale_module` function. Write a Python function `def scale_module(module, scale)` to solve the following problem:
Scale the parameters of a module and return it.
Here is the function:
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module | Scale the parameters of a module and return it. |
15,649 | import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
The provided code snippet includes necessary dependencies for implementing the `mean_flat` function. Write a Python function `def mean_flat(tensor)` to solve the following problem:
Take the mean over all non-batch dimensions.
Here is the function:
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape)))) | Take the mean over all non-batch dimensions. |
15,650 | import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
The provided code snippet includes necessary dependencies for implementing the `normalization` function. Write a Python function `def normalization(channels)` to solve the following problem:
Make a standard normalization layer. :param channels: number of input channels. :return: an nn.Module for normalization.
Here is the function:
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels) | Make a standard normalization layer. :param channels: number of input channels. :return: an nn.Module for normalization. |
15,651 | import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
The provided code snippet includes necessary dependencies for implementing the `conv_nd` function. Write a Python function `def conv_nd(dims, *args, **kwargs)` to solve the following problem:
Create a 1D, 2D, or 3D convolution module.
Here is the function:
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}") | Create a 1D, 2D, or 3D convolution module. |
15,652 | import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
The provided code snippet includes necessary dependencies for implementing the `linear` function. Write a Python function `def linear(*args, **kwargs)` to solve the following problem:
Create a linear module.
Here is the function:
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs) | Create a linear module. |
15,653 | import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
The provided code snippet includes necessary dependencies for implementing the `avg_pool_nd` function. Write a Python function `def avg_pool_nd(dims, *args, **kwargs)` to solve the following problem:
Create a 1D, 2D, or 3D average pooling module.
Here is the function:
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}") | Create a 1D, 2D, or 3D average pooling module. |
15,654 | import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
def noise_like(shape, device, repeat=False):
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
noise = lambda: torch.randn(shape, device=device)
return repeat_noise() if repeat else noise() | null |
15,663 | from inspect import isfunction
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from ldm.modules.diffusionmodules.util import checkpoint
def exists(val):
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d | null |
15,685 | import os
import math
import random
import numpy as np
import torch
import cv2
from torchvision.utils import make_grid
from datetime import datetime
def imwrite(img, img_path):
def imsave(img, img_path):
img = np.squeeze(img)
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
cv2.imwrite(img_path, img) | null |
15,733 | import importlib
import torch
import numpy as np
from collections import abc
from einops import rearrange
from functools import partial
import multiprocessing as mp
from threading import Thread
from queue import Queue
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
def exists(x):
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d | null |
15,736 | import importlib
import torch
import numpy as np
from collections import abc
from einops import rearrange
from functools import partial
import multiprocessing as mp
from threading import Thread
from queue import Queue
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def instantiate_from_config(config):
if not "target" in config:
if config == '__is_first_stage__':
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict())) | null |
15,739 | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler
The provided code snippet includes necessary dependencies for implementing the `disabled_train` function. Write a Python function `def disabled_train(self, mode=True)` to solve the following problem:
Overwrite model.train with this function to make sure train/eval mode does not change anymore.
Here is the function:
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self | Overwrite model.train with this function to make sure train/eval mode does not change anymore. |
15,740 | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2 | null |
15,741 | import os
import torch
from PIL import Image
import random
import time
import numpy as np
import uuid
import cv2
import wget
from transformers import pipeline
from .utils import (cal_dilate_factor, dilate_mask, gen_new_name,
seed_everything, prompts, resize_800,
gen_new_seed, GLOBAL_SEED)
from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline, StableDiffusionInstructPix2PixPipeline
from diffusers import EulerAncestralDiscreteScheduler
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
from controlnet_aux import OpenposeDetector, MLSDdetector, HEDdetector
from segment_anything.utils.amg import remove_small_regions
from segment_anything import build_sam, sam_model_registry, SamAutomaticMaskGenerator
from .sam_preditor import SamPredictor
import easyocr
def HWC3(x):
assert x.dtype == np.uint8
if x.ndim == 2:
x = x[:, :, None]
assert x.ndim == 3
H, W, C = x.shape
assert C == 1 or C == 3 or C == 4
if C == 3:
return x
if C == 1:
return np.concatenate([x, x, x], axis=2)
if C == 4:
color = x[:, :, 0:3].astype(np.float32)
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
y = color * alpha + 255.0 * (1.0 - alpha)
y = y.clip(0, 255).astype(np.uint8)
return y | null |
15,742 | import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.vision_transformer import _cfg, PatchEmbed
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_, DropPath
from timm.models.helpers import named_apply, adapt_input_conv
from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None,
use_grad_checkpointing=False, ckpt_layer=0):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
"""
super().__init__()
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer)
)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def forward(self, x, register_blk=-1):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed[:,:x.size(1),:]
x = self.pos_drop(x)
for i,blk in enumerate(self.blocks):
x = blk(x, register_blk==i)
x = self.norm(x)
return x
def load_pretrained(self, checkpoint_path, prefix=''):
_load_weights(self, checkpoint_path, prefix)
The provided code snippet includes necessary dependencies for implementing the `_load_weights` function. Write a Python function `def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = '')` to solve the following problem:
Load weights from .npz checkpoints for official Google Brain Flax implementation
Here is the function:
def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''):
""" Load weights from .npz checkpoints for official Google Brain Flax implementation
"""
import numpy as np
def _n2p(w, t=True):
if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:
w = w.flatten()
if t:
if w.ndim == 4:
w = w.transpose([3, 2, 0, 1])
elif w.ndim == 3:
w = w.transpose([2, 0, 1])
elif w.ndim == 2:
w = w.transpose([1, 0])
return torch.from_numpy(w)
w = np.load(checkpoint_path)
if not prefix and 'opt/target/embedding/kernel' in w:
prefix = 'opt/target/'
if hasattr(model.patch_embed, 'backbone'):
# hybrid
backbone = model.patch_embed.backbone
stem_only = not hasattr(backbone, 'stem')
stem = backbone if stem_only else backbone.stem
stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel'])))
stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale']))
stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias']))
if not stem_only:
for i, stage in enumerate(backbone.stages):
for j, block in enumerate(stage.blocks):
bp = f'{prefix}block{i + 1}/unit{j + 1}/'
for r in range(3):
getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel']))
getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale']))
getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias']))
if block.downsample is not None:
block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel']))
block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale']))
block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias']))
embed_conv_w = _n2p(w[f'{prefix}embedding/kernel'])
else:
embed_conv_w = adapt_input_conv(
model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel']))
model.patch_embed.proj.weight.copy_(embed_conv_w)
model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias']))
model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False))
pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False)
if pos_embed_w.shape != model.pos_embed.shape:
pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights
pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)
model.pos_embed.copy_(pos_embed_w)
model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))
model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))
for i, block in enumerate(model.blocks.children()):
block_prefix = f'{prefix}Transformer/encoderblock_{i}/'
mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/'
block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))
block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))
block.attn.qkv.weight.copy_(torch.cat([
_n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')]))
block.attn.qkv.bias.copy_(torch.cat([
_n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')]))
block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))
block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))
for r in range(2):
getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel']))
getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias']))
block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale']))
block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) | Load weights from .npz checkpoints for official Google Brain Flax implementation |
15,743 | from collections import abc
import os
import torch
from torch.nn import functional as F
from torch.autograd import Function
from torch.utils.cpp_extension import load
class UpFirDn2d(Function):
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h + down_y) // down_y
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w + down_x) // down_x
ctx.out_size = (out_h, out_w)
ctx.up = (up_x, up_y)
ctx.down = (down_x, down_y)
ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
out = upfirdn2d_op.upfirdn2d(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
)
# out = out.view(major, out_h, out_w, minor)
out = out.view(-1, channel, out_h, out_w)
return out
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = None
if ctx.needs_input_grad[0]:
grad_input = UpFirDn2dBackward.apply(
grad_output,
kernel,
grad_kernel,
ctx.up,
ctx.down,
ctx.pad,
ctx.g_pad,
ctx.in_size,
ctx.out_size,
)
return grad_input, None, None, None, None
def upfirdn2d_native(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(
out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
)
out = out[
:,
max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
:,
]
out = out.permute(0, 3, 1, 2)
out = out.reshape(
[-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
)
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(
-1,
minor,
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h + down_y) // down_y
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w + down_x) // down_x
return out.view(-1, channel, out_h, out_w)
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
if not isinstance(up, abc.Iterable):
up = (up, up)
if not isinstance(down, abc.Iterable):
down = (down, down)
if len(pad) == 2:
pad = (pad[0], pad[1], pad[0], pad[1])
if input.device.type == "cpu":
out = upfirdn2d_native(input, kernel, *up, *down, *pad)
else:
out = UpFirDn2d.apply(input, kernel, up, down, pad)
return out | null |
15,744 | import contextlib
import warnings
import torch
from torch import autograd
from torch.nn import functional as F
weight_gradients_disabled = False
def no_weight_gradients():
global weight_gradients_disabled
old = weight_gradients_disabled
weight_gradients_disabled = True
yield
weight_gradients_disabled = old | null |
15,745 | import os
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Function
from torch.utils.cpp_extension import load
class FusedLeakyReLUFunction(Function):
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
ctx.bias = bias is not None
if bias is None:
bias = empty
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.bias, ctx.negative_slope, ctx.scale
)
if not ctx.bias:
grad_bias = None
return grad_input, grad_bias, None, None
def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5):
if input.device.type == "cpu":
if bias is not None:
rest_dim = [1] * (input.ndim - bias.ndim - 1)
return (
F.leaky_relu(
input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2
)
* scale
)
else:
return F.leaky_relu(input, negative_slope=0.2) * scale
else:
return FusedLeakyReLUFunction.apply(
input.contiguous(), bias, negative_slope, scale
) | null |
15,746 | import math
import random
import torch
from torch import nn
from torch.nn import functional as F
from .op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d, conv2d_gradfix
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k | null |
15,747 | import copy
import os
import random
import urllib.request
import torch
import torch.nn.functional as FF
import torch.optim
from torchvision import utils
from tqdm import tqdm
from .stylegan2.model import Generator
def get_path(base_path):
BASE_DIR = os.path.join('model_zoo')
save_path = os.path.join(BASE_DIR, base_path)
if not os.path.exists(save_path):
url = f"https://huggingface.co/aaronb/StyleGAN2/resolve/main/{base_path}"
print(f'{base_path} not found')
print('Try to download from huggingface: ', url)
os.makedirs(os.path.dirname(save_path), exist_ok=True)
download_url(url, save_path)
print('Downloaded to ', save_path)
return save_path
class CustomGenerator(Generator):
def prepare(
self,
styles,
inject_index=None,
truncation=1,
truncation_latent=None,
input_is_latent=False,
noise=None,
randomize_noise=True,
):
if not input_is_latent:
styles = [self.style(s) for s in styles]
if noise is None:
if randomize_noise:
noise = [None] * self.num_layers
else:
noise = [
getattr(self.noises, f"noise_{i}") for i in range(self.num_layers)
]
if truncation < 1:
style_t = []
for style in styles:
style_t.append(
truncation_latent + truncation * (style - truncation_latent)
)
styles = style_t
if len(styles) < 2:
inject_index = self.n_latent
if styles[0].ndim < 3:
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else:
latent = styles[0]
else:
if inject_index is None:
inject_index = random.randint(1, self.n_latent - 1)
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
latent = torch.cat([latent, latent2], 1)
# latent = latent.to('cpu')
# for i in range(len(noise)):
# if isinstance(noise[i], torch.Tensor):
# noise[i] = noise[i].to('cpu')
# torch.cuda.empty_cache()
# torch.cuda.ipc_collect()
return latent, noise
def generate(
self,
latent,
noise,
):
out = self.input(latent)
out = self.conv1(out, latent[:, 0], noise=noise[0])
skip = self.to_rgb1(out, latent[:, 1])
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
):
out = conv1(out, latent[:, i], noise=noise1)
out = conv2(out, latent[:, i + 1], noise=noise2)
skip = to_rgb(out, latent[:, i + 2], skip)
if out.shape[-1] == 256: F = out
i += 2
image = skip
F = FF.interpolate(F, image.shape[-2:], mode='bilinear')
return image, F
def stylegan2(
size=1024,
channel_multiplier=2,
latent=512,
n_mlp=8,
ckpt='stylegan2-ffhq-config-f.pt'
):
g_ema = CustomGenerator(size, latent, n_mlp, channel_multiplier=channel_multiplier)
checkpoint = torch.load(get_path(ckpt))
g_ema.load_state_dict(checkpoint["g_ema"], strict=False)
g_ema.requires_grad_(False)
g_ema.eval()
return g_ema | null |
15,748 | import copy
import os
import random
import urllib.request
import torch
import torch.nn.functional as FF
import torch.optim
from torchvision import utils
from tqdm import tqdm
from .stylegan2.model import Generator
def bilinear_interpolate_torch(im, y, x):
def drag_gan(g_ema, latent: torch.Tensor, noise, F, handle_points, target_points, mask=None, device='cpu', max_iters=1000):
handle_points0 = copy.deepcopy(handle_points)
n = len(handle_points)
r1, r2, lam, d = 3, 12, 20, 1
def neighbor(x, y, d):
points = []
for i in range(x - d, x + d):
for j in range(y - d, y + d):
points.append(torch.tensor([i, j]).float().to(device))
return points
F0 = F.detach().clone()
latent_trainable = latent[:, :6, :].detach().clone().requires_grad_(True)
latent_untrainable = latent[:, 6:, :].detach().clone().requires_grad_(False)
optimizer = torch.optim.Adam([latent_trainable], lr=2e-3)
for iter in range(max_iters):
for s in range(1):
optimizer.zero_grad()
latent = torch.cat([latent_trainable, latent_untrainable], dim=1)
sample2, F2 = g_ema.generate(latent, noise)
# motion supervision
loss = 0
for i in range(n):
pi, ti = handle_points[i], target_points[i]
di = (ti - pi) / torch.sum((ti - pi)**2)
for qi in neighbor(int(pi[0]), int(pi[1]), r1):
# f1 = F[..., int(qi[0]), int(qi[1])]
# f2 = F2[..., int(qi[0] + di[0]), int(qi[1] + di[1])]
f1 = bilinear_interpolate_torch(F2, qi[0], qi[1]).detach()
f2 = bilinear_interpolate_torch(F2, qi[0] + di[0], qi[1] + di[1])
loss += FF.l1_loss(f2, f1)
if mask is not None:
loss += ((F2 - F0) * (1 - mask)).abs().mean() * lam
loss.backward()
optimizer.step()
# point tracking
with torch.no_grad():
sample2, F2 = g_ema.generate(latent, noise)
for i in range(n):
pi = handle_points0[i]
# f = F0[..., int(pi[0]), int(pi[1])]
f0 = bilinear_interpolate_torch(F0, pi[0], pi[1])
minv = 1e9
minx = 1e9
miny = 1e9
for qi in neighbor(int(handle_points[i][0]), int(handle_points[i][1]), r2):
# f2 = F2[..., int(qi[0]), int(qi[1])]
try:
f2 = bilinear_interpolate_torch(F2, qi[0], qi[1])
except:
import ipdb
ipdb.set_trace()
v = torch.norm(f2 - f0, p=1)
if v < minv:
minv = v
minx = int(qi[0])
miny = int(qi[1])
handle_points[i][0] = minx
handle_points[i][1] = miny
F2 = F2.cpu()
yield sample2, latent, F2, handle_points | null |
15,749 | import os
import sys
import cv2
import numpy as np
import torch
import ipdb
from PIL import Image
from .utils import gen_new_name, prompts
import torch
from omegaconf import OmegaConf
import numpy as np
import wget
from .inpainting_src.ldm_inpainting.ldm.models.diffusion.ddim import DDIMSampler
from .inpainting_src.ldm_inpainting.ldm.util import instantiate_from_config
from .utils import cal_dilate_factor, dilate_mask
def make_batch(image, mask, device):
image = image.astype(np.float32) / 255.0
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
mask = mask.astype(np.float32) / 255.0
mask = mask[None, None]
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
mask = torch.from_numpy(mask)
masked_image = (1 - mask) * image
batch = {"image": image, "mask": mask, "masked_image": masked_image}
for k in batch:
batch[k] = batch[k].to(device=device)
batch[k] = batch[k] * 2.0 - 1.0
return batch | null |
15,750 | import numpy as np
from scipy import interpolate
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
The provided code snippet includes necessary dependencies for implementing the `window_partition` function. Write a Python function `def window_partition(x, window_size)` to solve the following problem:
Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C)
Here is the function:
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows | Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.