id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
153,384 | from __future__ import print_function
import argparse
import os
import shutil
import time
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import models.imagenet as customized_models
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
from utils.radam import RAdam, AdamW
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = state['lr'] | null |
153,385 | from __future__ import print_function
import argparse
import os
import shutil
import time
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models.cifar as models
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
from utils.radam import RAdam, RAdam_4step, AdamW
torch.manual_seed(args.manualSeed)
def train(trainloader, model, criterion, optimizer, epoch, use_cuda):
# switch to train mode
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
bar = Bar('Processing', max=len(trainloader))
for batch_idx, (inputs, targets) in enumerate(trainloader):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda(non_blocking=True)
inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)
# compute output
outputs = model(inputs)
loss = criterion(outputs, targets)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=batch_idx + 1,
size=len(trainloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg) | null |
153,386 | from __future__ import print_function
import argparse
import os
import shutil
import time
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models.cifar as models
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
from utils.radam import RAdam, RAdam_4step, AdamW
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
torch.manual_seed(args.manualSeed)
def save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar')) | null |
153,387 | from __future__ import print_function
import argparse
import os
import shutil
import time
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models.cifar as models
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
from utils.radam import RAdam, RAdam_4step, AdamW
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
assert args.dataset == 'cifar10' or args.dataset == 'cifar100', 'Dataset can only be cifar10 or cifar100.'
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = state['lr'] | null |
153,388 | import torch.nn as nn
class AlexNet(nn.Module):
def __init__(self, num_classes=10):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=5),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.classifier = nn.Linear(256, num_classes)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
The provided code snippet includes necessary dependencies for implementing the `alexnet` function. Write a Python function `def alexnet(**kwargs)` to solve the following problem:
r"""AlexNet model architecture from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Here is the function:
def alexnet(**kwargs):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
"""
model = AlexNet(**kwargs)
return model | r"""AlexNet model architecture from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. |
153,389 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.autograd import Variable
class DenseNet(nn.Module):
def __init__(self, depth=22, block=Bottleneck,
dropRate=0, num_classes=10, growthRate=12, compressionRate=2):
super(DenseNet, self).__init__()
assert (depth - 4) % 3 == 0, 'depth should be 3n+4'
n = (depth - 4) / 3 if block == BasicBlock else (depth - 4) // 6
self.growthRate = growthRate
self.dropRate = dropRate
# self.inplanes is a global variable used across multiple
# helper functions
self.inplanes = growthRate * 2
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, padding=1,
bias=False)
self.dense1 = self._make_denseblock(block, n)
self.trans1 = self._make_transition(compressionRate)
self.dense2 = self._make_denseblock(block, n)
self.trans2 = self._make_transition(compressionRate)
self.dense3 = self._make_denseblock(block, n)
self.bn = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(self.inplanes, num_classes)
# Weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_denseblock(self, block, blocks):
layers = []
for i in range(blocks):
# Currently we fix the expansion ratio as the default value
layers.append(block(self.inplanes, growthRate=self.growthRate, dropRate=self.dropRate))
self.inplanes += self.growthRate
return nn.Sequential(*layers)
def _make_transition(self, compressionRate):
inplanes = self.inplanes
outplanes = int(math.floor(self.inplanes // compressionRate))
self.inplanes = outplanes
return Transition(inplanes, outplanes)
def forward(self, x):
x = self.conv1(x)
x = self.trans1(self.dense1(x))
x = self.trans2(self.dense2(x))
x = self.dense3(x)
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
The provided code snippet includes necessary dependencies for implementing the `densenet` function. Write a Python function `def densenet(**kwargs)` to solve the following problem:
Constructs a ResNet model.
Here is the function:
def densenet(**kwargs):
"""
Constructs a ResNet model.
"""
return DenseNet(**kwargs) | Constructs a ResNet model. |
153,390 | from __future__ import division
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class CifarResNeXt(nn.Module):
"""
ResNext optimized for the Cifar dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, cardinality, depth, num_classes, widen_factor=4, dropRate=0):
""" Constructor
Args:
cardinality: number of convolution groups.
depth: number of layers.
num_classes: number of classes
widen_factor: factor to adjust the channel dimensionality
"""
super(CifarResNeXt, self).__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = (self.depth - 2) // 9
self.widen_factor = widen_factor
self.num_classes = num_classes
self.output_size = 64
self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
self.classifier = nn.Linear(1024, num_classes)
init.kaiming_normal(self.classifier.weight)
for key in self.state_dict():
if key.split('.')[-1] == 'weight':
if 'conv' in key:
init.kaiming_normal(self.state_dict()[key], mode='fan_out')
if 'bn' in key:
self.state_dict()[key][...] = 1
elif key.split('.')[-1] == 'bias':
self.state_dict()[key][...] = 0
def block(self, name, in_channels, out_channels, pool_stride=2):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
name: string name of the current block.
in_channels: number of input channels
out_channels: number of output channels
pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = '%s_bottleneck_%d' % (name, bottleneck)
if bottleneck == 0:
block.add_module(name_, ResNeXtBottleneck(in_channels, out_channels, pool_stride, self.cardinality,
self.widen_factor))
else:
block.add_module(name_,
ResNeXtBottleneck(out_channels, out_channels, 1, self.cardinality, self.widen_factor))
return block
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = F.relu(self.bn_1.forward(x), inplace=True)
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
x = self.stage_3.forward(x)
x = F.avg_pool2d(x, 8, 1)
x = x.view(-1, 1024)
return self.classifier(x)
The provided code snippet includes necessary dependencies for implementing the `resnext` function. Write a Python function `def resnext(**kwargs)` to solve the following problem:
Constructs a ResNeXt.
Here is the function:
def resnext(**kwargs):
"""Constructs a ResNeXt.
"""
model = CifarResNeXt(**kwargs)
return model | Constructs a ResNeXt. |
153,391 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
The provided code snippet includes necessary dependencies for implementing the `wrn` function. Write a Python function `def wrn(**kwargs)` to solve the following problem:
Constructs a Wide Residual Networks.
Here is the function:
def wrn(**kwargs):
"""
Constructs a Wide Residual Networks.
"""
model = WideResNet(**kwargs)
return model | Constructs a Wide Residual Networks. |
153,392 | from __future__ import absolute_import
import torch.nn as nn
import math
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False) | 3x3 convolution with padding |
153,393 | from __future__ import absolute_import
import torch.nn as nn
import math
class PreResNet(nn.Module):
def __init__(self, depth, num_classes=1000, block_name='BasicBlock'):
super(PreResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
bias=False)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.bn = nn.BatchNorm2d(64 * block.expansion)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x) # 32x32
x = self.layer2(x) # 16x16
x = self.layer3(x) # 8x8
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
The provided code snippet includes necessary dependencies for implementing the `preresnet` function. Write a Python function `def preresnet(**kwargs)` to solve the following problem:
Constructs a ResNet model.
Here is the function:
def preresnet(**kwargs):
"""
Constructs a ResNet model.
"""
return PreResNet(**kwargs) | Constructs a ResNet model. |
153,395 | from __future__ import absolute_import
import torch.nn as nn
import math
class ResNet(nn.Module):
def __init__(self, depth, num_classes=1000, block_name='BasicBlock'):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x) # 32x32
x = self.layer1(x) # 32x32
x = self.layer2(x) # 16x16
x = self.layer3(x) # 8x8
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
The provided code snippet includes necessary dependencies for implementing the `resnet` function. Write a Python function `def resnet(**kwargs)` to solve the following problem:
Constructs a ResNet model.
Here is the function:
def resnet(**kwargs):
"""
Constructs a ResNet model.
"""
return ResNet(**kwargs) | Constructs a ResNet model. |
153,396 | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg11` function. Write a Python function `def vgg11(**kwargs)` to solve the following problem:
VGG 11-layer model (configuration "A") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def vgg11(**kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['A']), **kwargs)
return model | VGG 11-layer model (configuration "A") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
153,397 | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg11_bn` function. Write a Python function `def vgg11_bn(**kwargs)` to solve the following problem:
VGG 11-layer model (configuration "A") with batch normalization
Here is the function:
def vgg11_bn(**kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization"""
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
return model | VGG 11-layer model (configuration "A") with batch normalization |
153,398 | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg13` function. Write a Python function `def vgg13(**kwargs)` to solve the following problem:
VGG 13-layer model (configuration "B") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def vgg13(**kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['B']), **kwargs)
return model | VGG 13-layer model (configuration "B") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
153,399 | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg13_bn` function. Write a Python function `def vgg13_bn(**kwargs)` to solve the following problem:
VGG 13-layer model (configuration "B") with batch normalization
Here is the function:
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
return model | VGG 13-layer model (configuration "B") with batch normalization |
153,400 | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg16` function. Write a Python function `def vgg16(**kwargs)` to solve the following problem:
VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def vgg16(**kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['D']), **kwargs)
return model | VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
153,401 | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg16_bn` function. Write a Python function `def vgg16_bn(**kwargs)` to solve the following problem:
VGG 16-layer model (configuration "D") with batch normalization
Here is the function:
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
return model | VGG 16-layer model (configuration "D") with batch normalization |
153,402 | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg19` function. Write a Python function `def vgg19(**kwargs)` to solve the following problem:
VGG 19-layer model (configuration "E") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def vgg19(**kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E']), **kwargs)
return model | VGG 19-layer model (configuration "E") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
153,403 | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg19_bn` function. Write a Python function `def vgg19_bn(**kwargs)` to solve the following problem:
VGG 19-layer model (configuration 'E') with batch normalization
Here is the function:
def vgg19_bn(**kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
return model | VGG 19-layer model (configuration 'E') with batch normalization |
153,404 | from __future__ import division
import math
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import torch
class ResNeXt(nn.Module):
"""
ResNext optimized for the ImageNet dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, baseWidth, cardinality, layers, num_classes):
""" Constructor
Args:
baseWidth: baseWidth for ResNeXt.
cardinality: number of convolution groups.
layers: config of layers, e.g., [3, 4, 6, 3]
num_classes: number of classes
"""
super(ResNeXt, self).__init__()
block = Bottleneck
self.cardinality = cardinality
self.baseWidth = baseWidth
self.num_classes = num_classes
self.inplanes = 64
self.output_size = 64
self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], 2)
self.layer3 = self._make_layer(block, 256, layers[2], 2)
self.layer4 = self._make_layer(block, 512, layers[3], 2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
block: block type used to construct ResNext
planes: number of output channels (need to multiply by block.expansion)
blocks: number of blocks to be built
stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, self.baseWidth, self.cardinality, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, self.baseWidth, self.cardinality))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
The provided code snippet includes necessary dependencies for implementing the `resnext50` function. Write a Python function `def resnext50(baseWidth, cardinality)` to solve the following problem:
Construct ResNeXt-50.
Here is the function:
def resnext50(baseWidth, cardinality):
"""
Construct ResNeXt-50.
"""
model = ResNeXt(baseWidth, cardinality, [3, 4, 6, 3], 1000)
return model | Construct ResNeXt-50. |
153,405 | from __future__ import division
import math
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import torch
class ResNeXt(nn.Module):
"""
ResNext optimized for the ImageNet dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, baseWidth, cardinality, layers, num_classes):
""" Constructor
Args:
baseWidth: baseWidth for ResNeXt.
cardinality: number of convolution groups.
layers: config of layers, e.g., [3, 4, 6, 3]
num_classes: number of classes
"""
super(ResNeXt, self).__init__()
block = Bottleneck
self.cardinality = cardinality
self.baseWidth = baseWidth
self.num_classes = num_classes
self.inplanes = 64
self.output_size = 64
self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], 2)
self.layer3 = self._make_layer(block, 256, layers[2], 2)
self.layer4 = self._make_layer(block, 512, layers[3], 2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
block: block type used to construct ResNext
planes: number of output channels (need to multiply by block.expansion)
blocks: number of blocks to be built
stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, self.baseWidth, self.cardinality, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, self.baseWidth, self.cardinality))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
The provided code snippet includes necessary dependencies for implementing the `resnext101` function. Write a Python function `def resnext101(baseWidth, cardinality)` to solve the following problem:
Construct ResNeXt-101.
Here is the function:
def resnext101(baseWidth, cardinality):
"""
Construct ResNeXt-101.
"""
model = ResNeXt(baseWidth, cardinality, [3, 4, 23, 3], 1000)
return model | Construct ResNeXt-101. |
153,406 | from __future__ import division
import math
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import torch
class ResNeXt(nn.Module):
"""
ResNext optimized for the ImageNet dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, baseWidth, cardinality, layers, num_classes):
""" Constructor
Args:
baseWidth: baseWidth for ResNeXt.
cardinality: number of convolution groups.
layers: config of layers, e.g., [3, 4, 6, 3]
num_classes: number of classes
"""
super(ResNeXt, self).__init__()
block = Bottleneck
self.cardinality = cardinality
self.baseWidth = baseWidth
self.num_classes = num_classes
self.inplanes = 64
self.output_size = 64
self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], 2)
self.layer3 = self._make_layer(block, 256, layers[2], 2)
self.layer4 = self._make_layer(block, 512, layers[3], 2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
block: block type used to construct ResNext
planes: number of output channels (need to multiply by block.expansion)
blocks: number of blocks to be built
stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, self.baseWidth, self.cardinality, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, self.baseWidth, self.cardinality))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
The provided code snippet includes necessary dependencies for implementing the `resnext152` function. Write a Python function `def resnext152(baseWidth, cardinality)` to solve the following problem:
Construct ResNeXt-152.
Here is the function:
def resnext152(baseWidth, cardinality):
"""
Construct ResNeXt-152.
"""
model = ResNeXt(baseWidth, cardinality, [3, 8, 36, 3], 1000)
return model | Construct ResNeXt-152. |
153,407 | import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
from .misc import *
def gauss(x,a,b,c):
return torch.exp(-torch.pow(torch.add(x,-b),2).div(2*c*c)).mul(a)
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
The provided code snippet includes necessary dependencies for implementing the `colorize` function. Write a Python function `def colorize(x)` to solve the following problem:
Converts a one-channel grayscale image to a color heatmap image
Here is the function:
def colorize(x):
''' Converts a one-channel grayscale image to a color heatmap image '''
if x.dim() == 2:
torch.unsqueeze(x, 0, out=x)
if x.dim() == 3:
cl = torch.zeros([3, x.size(1), x.size(2)])
cl[0] = gauss(x,.5,.6,.2) + gauss(x,1,.8,.3)
cl[1] = gauss(x,1,.5,.3)
cl[2] = gauss(x,1,.2,.3)
cl[cl.gt(1)] = 1
elif x.dim() == 4:
cl = torch.zeros([x.size(0), 3, x.size(2), x.size(3)])
cl[:,0,:,:] = gauss(x,.5,.6,.2) + gauss(x,1,.8,.3)
cl[:,1,:,:] = gauss(x,1,.5,.3)
cl[:,2,:,:] = gauss(x,1,.2,.3)
return cl | Converts a one-channel grayscale image to a color heatmap image |
153,408 | import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
from .misc import *
def make_image(img, mean=(0,0,0), std=(1,1,1)):
for i in range(0, 3):
img[i] = img[i] * std[i] + mean[i] # unnormalize
npimg = img.numpy()
return np.transpose(npimg, (1, 2, 0))
def show_batch(images, Mean=(2, 2, 2), Std=(0.5,0.5,0.5)):
images = make_image(torchvision.utils.make_grid(images), Mean, Std)
plt.imshow(images)
plt.show() | null |
153,409 | import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
from .misc import *
def make_image(img, mean=(0,0,0), std=(1,1,1)):
def show_mask_single(images, mask, Mean=(2, 2, 2), Std=(0.5,0.5,0.5)):
im_size = images.size(2)
# save for adding mask
im_data = images.clone()
for i in range(0, 3):
im_data[:,i,:,:] = im_data[:,i,:,:] * Std[i] + Mean[i] # unnormalize
images = make_image(torchvision.utils.make_grid(images), Mean, Std)
plt.subplot(2, 1, 1)
plt.imshow(images)
plt.axis('off')
# for b in range(mask.size(0)):
# mask[b] = (mask[b] - mask[b].min())/(mask[b].max() - mask[b].min())
mask_size = mask.size(2)
# print('Max %f Min %f' % (mask.max(), mask.min()))
mask = (upsampling(mask, scale_factor=im_size/mask_size))
# mask = colorize(upsampling(mask, scale_factor=im_size/mask_size))
# for c in range(3):
# mask[:,c,:,:] = (mask[:,c,:,:] - Mean[c])/Std[c]
# print(mask.size())
mask = make_image(torchvision.utils.make_grid(0.3*im_data+0.7*mask.expand_as(im_data)))
# mask = make_image(torchvision.utils.make_grid(0.3*im_data+0.7*mask), Mean, Std)
plt.subplot(2, 1, 2)
plt.imshow(mask)
plt.axis('off') | null |
153,410 | import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
from .misc import *
def make_image(img, mean=(0,0,0), std=(1,1,1)):
def show_mask(images, masklist, Mean=(2, 2, 2), Std=(0.5,0.5,0.5)):
im_size = images.size(2)
# save for adding mask
im_data = images.clone()
for i in range(0, 3):
im_data[:,i,:,:] = im_data[:,i,:,:] * Std[i] + Mean[i] # unnormalize
images = make_image(torchvision.utils.make_grid(images), Mean, Std)
plt.subplot(1+len(masklist), 1, 1)
plt.imshow(images)
plt.axis('off')
for i in range(len(masklist)):
mask = masklist[i].data.cpu()
# for b in range(mask.size(0)):
# mask[b] = (mask[b] - mask[b].min())/(mask[b].max() - mask[b].min())
mask_size = mask.size(2)
# print('Max %f Min %f' % (mask.max(), mask.min()))
mask = (upsampling(mask, scale_factor=im_size/mask_size))
# mask = colorize(upsampling(mask, scale_factor=im_size/mask_size))
# for c in range(3):
# mask[:,c,:,:] = (mask[:,c,:,:] - Mean[c])/Std[c]
# print(mask.size())
mask = make_image(torchvision.utils.make_grid(0.3*im_data+0.7*mask.expand_as(im_data)))
# mask = make_image(torchvision.utils.make_grid(0.3*im_data+0.7*mask), Mean, Std)
plt.subplot(1+len(masklist), 1, i+2)
plt.imshow(mask)
plt.axis('off') | null |
153,411 | import errno
import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
The provided code snippet includes necessary dependencies for implementing the `get_mean_and_std` function. Write a Python function `def get_mean_and_std(dataset)` to solve the following problem:
Compute the mean and std value of dataset.
Here is the function:
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = trainloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std | Compute the mean and std value of dataset. |
153,412 | import errno
import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
The provided code snippet includes necessary dependencies for implementing the `init_params` function. Write a Python function `def init_params(net)` to solve the following problem:
Init layer parameters.
Here is the function:
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0) | Init layer parameters. |
153,413 | import errno
import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
The provided code snippet includes necessary dependencies for implementing the `mkdir_p` function. Write a Python function `def mkdir_p(path)` to solve the following problem:
make dir if not exist
Here is the function:
def mkdir_p(path):
'''make dir if not exist'''
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise | make dir if not exist |
153,414 | from __future__ import print_function, absolute_import
The provided code snippet includes necessary dependencies for implementing the `accuracy` function. Write a Python function `def accuracy(output, target, topk=(1,))` to solve the following problem:
Computes the precision@k for the specified values of k
Here is the function:
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res | Computes the precision@k for the specified values of k |
153,415 | from __future__ import absolute_import
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
import sys
import numpy as np
def savefig(fname, dpi=None):
dpi = 150 if dpi == None else dpi
plt.savefig(fname, dpi=dpi) | null |
153,416 | from __future__ import absolute_import
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
import sys
import numpy as np
def plot_overlap(logger, names=None):
names = logger.names if names == None else names
numbers = logger.numbers
for _, name in enumerate(names):
x = np.arange(len(numbers[name]))
plt.plot(x, np.asarray(numbers[name]))
return [logger.title + '(' + name + ')' for name in names] | null |
153,417 | import argparse
import collections
import torch
import os
import re
The provided code snippet includes necessary dependencies for implementing the `average_checkpoints` function. Write a Python function `def average_checkpoints(inputs)` to solve the following problem:
Loads checkpoints from inputs and returns a model with averaged weights. Args: inputs: An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors.
Here is the function:
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights.
Args:
inputs: An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for f in inputs:
state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, 'cpu')
),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state['model']
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
'For checkpoint {}, expected list of params: {}, '
'but found: {}'.format(f, params_keys, model_params_keys)
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = collections.OrderedDict()
for k, v in params_dict.items():
averaged_params[k] = v
averaged_params[k].div_(num_models)
new_state['model'] = averaged_params
return new_state | Loads checkpoints from inputs and returns a model with averaged weights. Args: inputs: An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. |
153,418 | import argparse
import collections
import torch
import os
import re
def last_n_checkpoints(paths, n, update_based, upper_bound=None):
assert len(paths) == 1
path = paths[0]
if update_based:
pt_regexp = re.compile(r'checkpoint_\d+_(\d+)\.pt')
else:
pt_regexp = re.compile(r'checkpoint(\d+)\.pt')
files = os.listdir(path)
entries = []
for f in files:
m = pt_regexp.fullmatch(f)
if m is not None:
sort_key = int(m.group(1))
if upper_bound is None or sort_key <= upper_bound:
entries.append((sort_key, m.group(0)))
if len(entries) < n:
raise Exception('Found {} checkpoint files but need at least {}', len(entries), n)
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]] | null |
153,419 | from jinja2 import Environment, StrictUndefined
from tiktoken import encoding_for_model, get_encoding
from alpha_codium.settings.config_loader import get_settings
def get_settings():
return global_settings
def get_token_encoder():
return (
encoding_for_model(get_settings().config.model)
if "gpt" in get_settings().config.model
else get_encoding("cl100k_base")
) | null |
153,420 | import argparse
import json
from collections import OrderedDict
from alpha_codium.code_contests.data.provider import CodeContestDataProvider
from alpha_codium.log import get_logger
class CodeContestDataProvider:
def __init__(self, dataset_location, connection=None):
self.private_datasets_root = os.path.expanduser(
get_settings().config.private_dataset_cache_dir
)
(
self.dataset_location,
self.dataset_name,
self.load_from_disk,
) = self.parse_location(dataset_location)
self.dataset = self.load_dataset()
self.connection = connection or duckdb.connect()
self.connect(self.dataset)
def find_problem(ds, problem_name, split_name=None, evaluation_test_type = None):
if split_name:
ds = ds[split_name]
example = None
if not problem_name:
for e in ds:
if evaluation_test_type:
tests = e.get(evaluation_test_type)
if tests and tests.get("input"):
example = e
break
else:
example = e
break
else:
problems = ds.filter(lambda example: example['name'] == problem_name)
if problems:
example = problems[0]
else:
raise ValueError(
f"problem with name {problem_name} doesn't exist in dataset {ds.info.dataset_name} in split {split_name}")
return example
def prepare_for_evaluation(
predictions, source_of_truth, evaluation_test_type
):
preds = predictions
sot = source_of_truth
sot = sot.select_columns(["name", evaluation_test_type])
sot = sot.rename_column("name", "task_name")
sot = sot.flatten()
sot = sot.rename_column(f"{evaluation_test_type}.input", "tests_inputs")
sot = sot.rename_column(f"{evaluation_test_type}.output", "tests_outputs")
joined = sot.to_pandas().merge(preds.to_pandas(), on="task_name", how="left")
joined["predictions"] = joined[["task_name", "solution_candidates"]].to_dict(
"records"
)
joined["references"] = joined[["tests_inputs", "tests_outputs"]].to_dict(
"records"
)
# Retain only the 'predictions' and 'references' columns
joined = joined[["predictions", "references"]]
restructured_dataset = Dataset.from_pandas(joined)
return restructured_dataset
def parse_location(self, dataset_location):
result_location = dataset_location
dataset_name = dataset_location.split(os.path.sep)[-1]
load_from_disk = True
if load_from_disk:
if not result_location.startswith(os.path.sep):
result_location = os.path.join(
self.private_datasets_root, result_location
)
return result_location, dataset_name, load_from_disk
def prepare_code_contest_split_for_eval(
ds, evaluation_test_type="public_tests", task_name_column="name",
path_to_solutions_column="solutions.solution"
):
solutions = ds.flatten()
solutions = solutions.rename_column(
path_to_solutions_column, "solution_candidates"
)
solutions = solutions.rename_column(task_name_column, "task_name")
solutions = solutions.select_columns(["task_name", "solution_candidates"])
return CodeContestDataProvider.prepare_for_evaluation(
predictions=solutions,
source_of_truth=ds,
evaluation_test_type=evaluation_test_type,
)
def show(self, ds, paths_to_python, paths_to_free_text):
result = ds.flatte()
def format_example(example):
for code_col in paths_to_python:
import black
example[code_col] = black.format_str(example[code_col])
for col in paths_to_free_text:
example[col] = example[col].replace("\\n", "\n")
pretty = result.map(format_example)
return pretty
def load_dataset(self):
if self.load_from_disk:
f = load_from_disk
else:
f = load_dataset
return f(self.dataset_location)
def connect(self, ds):
if hasattr(ds, "keys"):
for split in self.dataset.keys():
split_ds = self.dataset[split]
table = split_ds.data.table
self.connection.register(f"{split_ds.info.dataset_name}_{split}", table)
else:
self.connection.register(f"{ds.info.dataset_name}", ds.data.table)
def get_splits(self):
return self.dataset.keys()
def sample(ds, fraction=0.1):
table = ds
sample_size = int(len(table) * fraction)
indices = np.random.choice(len(table), sample_size, replace=False)
sampled_table = table.select(indices)
return sampled_table
def query(self, query_string) -> pd.DataFrame:
return self.connection.query(query_string).df()
def translate_references(self, ds):
expand = False
if not isinstance(ds, DatasetDict):
to_translate = {"ds": ds}
expand = True
else:
to_translate = ds
for ds_name, ds_val in to_translate.items():
for col in problem_translations:
translated_col = ds_val.features[col].int2str(ds_val[col])
ds_val = ds_val.remove_columns([col])
ds_val = ds_val.add_column(col, translated_col)
def translate_sequence_references(example, ds):
for col in solution_translations:
translator = ds.features[col].feature["language"]
arr = example[col]["language"]
translated_solution = [translator.int2str(item) for item in arr]
example[col]["language"] = translated_solution
return example
new_features = ds_val.features.copy()
for col in solution_translations:
new_features[col] = Sequence(
feature={"language": Value("string"), "solution": Value("string")}
)
ds_val = ds_val.map(
lambda example, ds=ds_val: translate_sequence_references(
example=example, ds=ds
),
features=new_features,
)
to_translate[ds_name] = ds_val
result = to_translate
if expand:
result = result[ds]
return result
def filter_solution_by_languages(self, ds, languages: Iterable[str], keep=True):
languages_set = set(languages)
def filter_solutions_by_languages(example):
for sol_col in solution_translations:
langs = example[sol_col]["language"]
sols = example[sol_col]["solution"]
filtered_languages = [
lang for lang in langs if (lang in languages_set) == keep
]
filtered_solutions = [
s
for idx, s in enumerate(sols)
if (langs[idx] in languages_set) == keep
]
example[sol_col] = {
"language": filtered_languages,
"solution": filtered_solutions,
}
return example
ds = ds.map(filter_solutions_by_languages)
return ds
The provided code snippet includes necessary dependencies for implementing the `evaluate_dataset_solution` function. Write a Python function `def evaluate_dataset_solution(dataset_name='valid_and_test_processed', split_name='test', solution_path_database='valid_database_solution.json')` to solve the following problem:
Evaluate the performance of dataset solutions. Args: dataset_name (str, optional): The name of the dataset. Defaults to 'valid_and_test_processed'. split_name (str, optional): The name of the split. Defaults to 'test'. solution_path_database (str, optional): The path to the solution database file. Defaults to 'valid_database_solution.json'.
Here is the function:
def evaluate_dataset_solution(dataset_name='valid_and_test_processed',
split_name='test',
solution_path_database='valid_database_solution.json'):
"""
Evaluate the performance of dataset solutions.
Args:
dataset_name (str, optional): The name of the dataset. Defaults to 'valid_and_test_processed'.
split_name (str, optional): The name of the split. Defaults to 'test'.
solution_path_database (str, optional): The path to the solution database file. Defaults to 'valid_database_solution.json'.
"""
# Load the dataset and solution database
data_provider = CodeContestDataProvider(dataset_location=dataset_name)
ds = data_provider.dataset[split_name]
with open(solution_path_database, 'r') as f:
database_solutions = json.load(f)
database_solutions[split_name] = OrderedDict(
sorted(database_solutions[split_name].items(), key=lambda x: int(x[0])))
# Initialize counters for passed and failed problems
total_passed = 0
total_failed = 0
# Iterate over the solutions in the database
for sol in database_solutions[split_name]:
try:
key_str = sol
key_int = int(key_str)
problem = ds[key_int]
if problem.get('is_valid_problem', True) is False:
print(f"problem {key_int} is not valid")
continue
solution = database_solutions[split_name][sol]
passed_current = -1
# scanning the iterations
v_iter =[v for v in solution.values() if (v is not None and 'solution' in v)]
for v in v_iter:
if not v:
continue
test_failed_generate = v['test_failed_generate']
test_failed_private = v['test_failed_private']
test_passed_generate = v['test_passed_generate']
test_passed_private = v['test_passed_private']
if 'test_timeout_generate' in v:
test_timeout_generate = v['test_timeout_generate']
test_timeout_private = v['test_timeout_private']
else:
test_timeout_generate = 0
test_timeout_private = 0
if ((test_failed_generate + test_timeout_generate + test_failed_private + test_timeout_private) == 0 and
(test_passed_generate + test_passed_private) > 0):
print(f"problem {key_int} passed all tests")
passed_current=1
break
else:
passed_current = 0
if passed_current == 1:
total_passed += 1
elif passed_current == 0:
total_failed += 1
except Exception as e:
print(f"Error: {e}")
pass
# Print the total number of passed and failed problems
print(f"total_passed: {total_passed}, total_failed: {total_failed}")
# Calculate the pass rate
pass_rate = total_passed / (total_passed + total_failed)
print(f"pass rate: {pass_rate}") | Evaluate the performance of dataset solutions. Args: dataset_name (str, optional): The name of the dataset. Defaults to 'valid_and_test_processed'. split_name (str, optional): The name of the split. Defaults to 'test'. solution_path_database (str, optional): The path to the solution database file. Defaults to 'valid_database_solution.json'. |
153,421 | import re
from typing import List
import yaml
from alpha_codium.code_contests.eval.code_test_runners import eval_solution
from alpha_codium.settings.config_loader import get_settings
from alpha_codium.log import get_logger
def clip_string(s: str, max_lines: int = None):
lines = s.split("\n")
if max_lines is not None and 0 < max_lines < len(lines):
logger.debug(f"clipping string from {len(lines)} to {max_lines}")
half_lines = int(max_lines / 2)
lines = (
lines[:half_lines] +
[f"\n.... {len(lines) - max_lines} omitted lines ....\n"] +
lines[-half_lines:]
)
return "\n".join(lines)
else:
return s
def get_settings():
return global_settings
def render_trace(trace_data):
if not trace_data:
return ''
max_trace_lines = get_settings().code_tester.get("max_trace_lines")
trace_data = clip_string(trace_data, max_trace_lines)
return trace_data | null |
153,422 | import re
from typing import List
import yaml
from alpha_codium.code_contests.eval.code_test_runners import eval_solution
from alpha_codium.settings.config_loader import get_settings
from alpha_codium.log import get_logger
logger = get_logger(__name__)
def eval_solution(evaluation_test_type: str = "private_tests",
example: dict = {}, # noqa # the code contest problem
prediction: str = '', # python code to be evaluated
test_inputs: Optional[List[str]] = None,
test_outputs: Optional[List[str]] = None,
silent=False,
break_on_timeout=False):
if not test_inputs:
test_inputs = example.get(evaluation_test_type).get("input") if example.get(evaluation_test_type) else None
if not test_outputs:
test_outputs = example.get(evaluation_test_type).get("output") if example.get(evaluation_test_type) else None
is_valid_test_list = example.get(evaluation_test_type).get("is_valid_test") if example.get(
evaluation_test_type) else None
if is_valid_test_list:
test_inputs = [test_inputs[i] for i, is_valid in enumerate(is_valid_test_list) if is_valid]
test_outputs = [test_outputs[i] for i, is_valid in enumerate(is_valid_test_list) if is_valid]
if test_inputs and test_outputs:
test_runner = PythonTestsRunner.factory(get_settings().code_tester.tester_type)
_, _, results = test_runner.run_tests(
test_id=example["name"],
candidate_id="id",
candidate=prediction,
test_inputs=test_inputs,
tests_outputs=test_outputs,
timeout=3,
break_on_timeout = break_on_timeout,
)
if not silent:
test_runner.print_test_results(results, test_inputs)
return test_inputs, results
else:
# logger.error(f"example '{example['name']}', type: '{evaluation_test_type}' doesn't have tests")
return test_inputs, []
def evaluate_on_private_tests(evaluation_test_type, problem, solution, silent=True):
# evaluate solution
test_results = None
if evaluation_test_type:
test_results = eval_solution(evaluation_test_type=evaluation_test_type, example=problem, prediction=solution, silent=silent)
test_passed = 0
test_failed = 0
test_timeout = 0
if not test_results[1]:
logger.info("No tests were run")
return test_results, 0, 0
for test in test_results[1].test_results:
if test.program_status.name=='kTimeout':
test_timeout += 1
elif not test.passed:
test_failed += 1
else:
test_passed += 1
logger.info("=====================================")
logger.info(f"test_passed: {test_passed}, test_failed: {test_failed}, test_timeout: {test_timeout}")
logger.info("=====================================")
return test_results, test_passed, test_failed, test_timeout | null |
153,423 | import asyncio
import logging
import os
from jinja2 import Environment, StrictUndefined
from alpha_codium.code_contests.data.provider import CodeContestDataProvider
from alpha_codium.gen.stages.run_baseline import run_baseline
from alpha_codium.gen.stages.run_choose_best_solution import run_choose_best_solution
from alpha_codium.gen.stages.run_evaluate_all_ai_tests import run_evaluate_all_ai_tests
from alpha_codium.gen.stages.run_evaluate_public_tests import run_evaluate_public_tests
from alpha_codium.gen.stages.run_generate_ai_test import run_generate_ai_tests
from alpha_codium.gen.stages.run_generate_possible_solutions import run_generate_possible_solutions
from alpha_codium.gen.stages.run_self_reflect import run_self_reflect
from alpha_codium.gen.stages.run_initial_code_generation import run_initial_code_generation
from alpha_codium.gen.stages.utils import set_configurations
from alpha_codium.gen.utils import evaluate_solution_on_subset
from alpha_codium.llm.ai_handler import AiHandler
from alpha_codium.log import get_logger
from alpha_codium.settings.config_loader import get_settings
def solve_my_problem(problem):
base_path = os.getcwd()
logger = get_logger(__name__)
solver = CodeContestsCompetitor()
os.chdir(base_path)
solution = solver.solve_problem_in_dataset(problem)
logger.info(f"testing solution on private tests with prediction:\n{solution}")
logger.info(f"evaluating solution on public tests...")
test_results, test_passed_public, test_failed_public, test_timeout_public = evaluate_solution_on_subset('public_tests',
problem,
solution,
silent=True)
logger.info(f"evaluating solution on private tests...")
test_results, test_passed_private, test_failed_private, test_timeout_private = evaluate_solution_on_subset('private_tests',
problem,
solution,
silent=True)
logger.info(f"evaluating solution on generated tests...")
test_results, test_passed_generate, test_failed_generate, test_timeout_generate = evaluate_solution_on_subset(
'generated_tests', problem, solution, silent=True)
logger.info(f"\ntest_passed_generate: {test_passed_generate}, test_passed_private: {test_passed_private}, test_passed_public: {test_passed_public}"
f"\ntest_failed_generate: {test_failed_generate}, test_failed_private: {test_failed_private}, test_failed_public: {test_failed_public}"
f"\ntest_timeout_generate: {test_timeout_generate}, test_timeout_private: {test_timeout_private}, test_timeout_public: {test_timeout_public}")
return solution, test_results
class CodeContestDataProvider:
def __init__(self, dataset_location, connection=None):
self.private_datasets_root = os.path.expanduser(
get_settings().config.private_dataset_cache_dir
)
(
self.dataset_location,
self.dataset_name,
self.load_from_disk,
) = self.parse_location(dataset_location)
self.dataset = self.load_dataset()
self.connection = connection or duckdb.connect()
self.connect(self.dataset)
def find_problem(ds, problem_name, split_name=None, evaluation_test_type = None):
if split_name:
ds = ds[split_name]
example = None
if not problem_name:
for e in ds:
if evaluation_test_type:
tests = e.get(evaluation_test_type)
if tests and tests.get("input"):
example = e
break
else:
example = e
break
else:
problems = ds.filter(lambda example: example['name'] == problem_name)
if problems:
example = problems[0]
else:
raise ValueError(
f"problem with name {problem_name} doesn't exist in dataset {ds.info.dataset_name} in split {split_name}")
return example
def prepare_for_evaluation(
predictions, source_of_truth, evaluation_test_type
):
preds = predictions
sot = source_of_truth
sot = sot.select_columns(["name", evaluation_test_type])
sot = sot.rename_column("name", "task_name")
sot = sot.flatten()
sot = sot.rename_column(f"{evaluation_test_type}.input", "tests_inputs")
sot = sot.rename_column(f"{evaluation_test_type}.output", "tests_outputs")
joined = sot.to_pandas().merge(preds.to_pandas(), on="task_name", how="left")
joined["predictions"] = joined[["task_name", "solution_candidates"]].to_dict(
"records"
)
joined["references"] = joined[["tests_inputs", "tests_outputs"]].to_dict(
"records"
)
# Retain only the 'predictions' and 'references' columns
joined = joined[["predictions", "references"]]
restructured_dataset = Dataset.from_pandas(joined)
return restructured_dataset
def parse_location(self, dataset_location):
result_location = dataset_location
dataset_name = dataset_location.split(os.path.sep)[-1]
load_from_disk = True
if load_from_disk:
if not result_location.startswith(os.path.sep):
result_location = os.path.join(
self.private_datasets_root, result_location
)
return result_location, dataset_name, load_from_disk
def prepare_code_contest_split_for_eval(
ds, evaluation_test_type="public_tests", task_name_column="name",
path_to_solutions_column="solutions.solution"
):
solutions = ds.flatten()
solutions = solutions.rename_column(
path_to_solutions_column, "solution_candidates"
)
solutions = solutions.rename_column(task_name_column, "task_name")
solutions = solutions.select_columns(["task_name", "solution_candidates"])
return CodeContestDataProvider.prepare_for_evaluation(
predictions=solutions,
source_of_truth=ds,
evaluation_test_type=evaluation_test_type,
)
def show(self, ds, paths_to_python, paths_to_free_text):
result = ds.flatte()
def format_example(example):
for code_col in paths_to_python:
import black
example[code_col] = black.format_str(example[code_col])
for col in paths_to_free_text:
example[col] = example[col].replace("\\n", "\n")
pretty = result.map(format_example)
return pretty
def load_dataset(self):
if self.load_from_disk:
f = load_from_disk
else:
f = load_dataset
return f(self.dataset_location)
def connect(self, ds):
if hasattr(ds, "keys"):
for split in self.dataset.keys():
split_ds = self.dataset[split]
table = split_ds.data.table
self.connection.register(f"{split_ds.info.dataset_name}_{split}", table)
else:
self.connection.register(f"{ds.info.dataset_name}", ds.data.table)
def get_splits(self):
return self.dataset.keys()
def sample(ds, fraction=0.1):
table = ds
sample_size = int(len(table) * fraction)
indices = np.random.choice(len(table), sample_size, replace=False)
sampled_table = table.select(indices)
return sampled_table
def query(self, query_string) -> pd.DataFrame:
return self.connection.query(query_string).df()
def translate_references(self, ds):
expand = False
if not isinstance(ds, DatasetDict):
to_translate = {"ds": ds}
expand = True
else:
to_translate = ds
for ds_name, ds_val in to_translate.items():
for col in problem_translations:
translated_col = ds_val.features[col].int2str(ds_val[col])
ds_val = ds_val.remove_columns([col])
ds_val = ds_val.add_column(col, translated_col)
def translate_sequence_references(example, ds):
for col in solution_translations:
translator = ds.features[col].feature["language"]
arr = example[col]["language"]
translated_solution = [translator.int2str(item) for item in arr]
example[col]["language"] = translated_solution
return example
new_features = ds_val.features.copy()
for col in solution_translations:
new_features[col] = Sequence(
feature={"language": Value("string"), "solution": Value("string")}
)
ds_val = ds_val.map(
lambda example, ds=ds_val: translate_sequence_references(
example=example, ds=ds
),
features=new_features,
)
to_translate[ds_name] = ds_val
result = to_translate
if expand:
result = result[ds]
return result
def filter_solution_by_languages(self, ds, languages: Iterable[str], keep=True):
languages_set = set(languages)
def filter_solutions_by_languages(example):
for sol_col in solution_translations:
langs = example[sol_col]["language"]
sols = example[sol_col]["solution"]
filtered_languages = [
lang for lang in langs if (lang in languages_set) == keep
]
filtered_solutions = [
s
for idx, s in enumerate(sols)
if (langs[idx] in languages_set) == keep
]
example[sol_col] = {
"language": filtered_languages,
"solution": filtered_solutions,
}
return example
ds = ds.map(filter_solutions_by_languages)
return ds
def evaluate_solution_on_subset(evaluation_test_type, problem, solution, silent=False, break_on_timeout=True):
# evaluate solution
test_results = None
if evaluation_test_type:
test_results = eval_solution(evaluation_test_type=evaluation_test_type, example=problem, prediction=solution,
silent=silent, break_on_timeout=break_on_timeout)
if test_results[1] == []:
if not silent:
logger.info("=====================================")
logger.info("No tests")
logger.info("=====================================")
return test_results, 0, 0, 0
if (hasattr(test_results[1], 'compilation_result') and
test_results[1].compilation_result.program_status.name == 'kTimeout'):
if not silent:
logger.info("=====================================")
logger.info("Timeout")
logger.info("=====================================")
return test_results, 0, 0, len(test_results[0])
test_passed = 0
test_failed = 0
test_timeout = 0
if not problem[evaluation_test_type]['input']:
logger.info(f"No {evaluation_test_type} for this problem")
else:
for test in test_results[1].test_results:
if (hasattr(test, 'program_status') and test.program_status.name == 'kTimeout'):
test_timeout += 1
elif not test.passed:
test_failed += 1
else:
test_passed += 1
if not silent:
logger.info("=====================================")
logger.info(f"test_passed: {test_passed}, test_failed: {test_failed}, test_timeout: {test_timeout}")
logger.info("=====================================")
return test_results, test_passed, test_failed, test_timeout
def get_logger(*args, **kwargs):
return logger
def get_settings():
return global_settings
def solve_problem(dataset_name,
split_name="valid",
problem_name="",
problem_number=0):
# load dataset
logger = get_logger(__name__)
data_provider = CodeContestDataProvider(dataset_location=dataset_name)
if problem_number and problem_name:
logger.info(f"problem_number and problem_name are both specified, using problem_name")
if not problem_name and problem_number:
problem_name = data_provider.dataset[split_name][int(problem_number)]['name']
logger.info(f"problem_name: {problem_name}")
# find problem
problem = data_provider.find_problem(ds=data_provider.dataset, problem_name=problem_name, split_name=split_name)
logger.info(f"problem['name']: {problem['name']}")
# # check if problem is valid (at least one of the provided solutions actually passes the generated tests)
# if not problem.get('is_valid_problem', True):
# logger.info(f"problem['is_valid_problem'] == False, skipping")
# return None, None
# evaluate prev solutions
evaluate_prev_solutions = get_settings().get("dataset.evaluate_prev_solutions", False)
if evaluate_prev_solutions:
try:
if not problem['solutions']['solution']:
logger.info("No public solutions for this problem")
found_solution = False
for index_published, sol_published in enumerate(problem['solutions']['solution']):
if 'python' not in problem['solutions']['language'][index_published].lower():
found_solution = True
continue
logger.info(f"evaluating public solution {index_published} on private tests...")
test_results, test_passed_private, test_failed_private, test_timeout_private \
= evaluate_solution_on_subset('private_tests', problem, sol_published, silent=True)
logger.info(f"evaluating public solution {index_published} on generated tests...")
test_results, test_passed_generate, test_failed_generate, test_timeout_generate = (
evaluate_solution_on_subset('generated_tests', problem, sol_published, silent=True))
if (test_failed_private == test_failed_generate == test_timeout_private == test_timeout_generate == 0) \
and test_passed_private + test_passed_generate > 0:
logger.info(f"sol_published index {index_published} passed all tests:\n{sol_published}")
found_solution = True
break
if not found_solution:
logger.info(f"None of the public solutions passed all tests")
except Exception as e:
logger.error(f"Error evaluating public solutions: {e}")
pass
return solve_my_problem(problem) | null |
153,424 | import json
import os
import shutil
from collections import OrderedDict
from alpha_codium.code_contests.data.provider import CodeContestDataProvider
from alpha_codium.gen.coding_competitor import CodeContestsCompetitor
from alpha_codium.gen.utils import evaluate_solution_on_subset
from alpha_codium.log import setup_logger, get_logger
from alpha_codium.settings.config_loader import get_settings
def is_solved(s):
if s['test_failed_private'] == 0 and s['test_failed_generate'] == 0 and \
s['test_timeout_private'] == 0 and s['test_timeout_generate'] == 0 and \
(s['test_passed_private'] + s['test_passed_generate']) > 0:
return True
else:
return False
class CodeContestDataProvider:
def __init__(self, dataset_location, connection=None):
self.private_datasets_root = os.path.expanduser(
get_settings().config.private_dataset_cache_dir
)
(
self.dataset_location,
self.dataset_name,
self.load_from_disk,
) = self.parse_location(dataset_location)
self.dataset = self.load_dataset()
self.connection = connection or duckdb.connect()
self.connect(self.dataset)
def find_problem(ds, problem_name, split_name=None, evaluation_test_type = None):
if split_name:
ds = ds[split_name]
example = None
if not problem_name:
for e in ds:
if evaluation_test_type:
tests = e.get(evaluation_test_type)
if tests and tests.get("input"):
example = e
break
else:
example = e
break
else:
problems = ds.filter(lambda example: example['name'] == problem_name)
if problems:
example = problems[0]
else:
raise ValueError(
f"problem with name {problem_name} doesn't exist in dataset {ds.info.dataset_name} in split {split_name}")
return example
def prepare_for_evaluation(
predictions, source_of_truth, evaluation_test_type
):
preds = predictions
sot = source_of_truth
sot = sot.select_columns(["name", evaluation_test_type])
sot = sot.rename_column("name", "task_name")
sot = sot.flatten()
sot = sot.rename_column(f"{evaluation_test_type}.input", "tests_inputs")
sot = sot.rename_column(f"{evaluation_test_type}.output", "tests_outputs")
joined = sot.to_pandas().merge(preds.to_pandas(), on="task_name", how="left")
joined["predictions"] = joined[["task_name", "solution_candidates"]].to_dict(
"records"
)
joined["references"] = joined[["tests_inputs", "tests_outputs"]].to_dict(
"records"
)
# Retain only the 'predictions' and 'references' columns
joined = joined[["predictions", "references"]]
restructured_dataset = Dataset.from_pandas(joined)
return restructured_dataset
def parse_location(self, dataset_location):
result_location = dataset_location
dataset_name = dataset_location.split(os.path.sep)[-1]
load_from_disk = True
if load_from_disk:
if not result_location.startswith(os.path.sep):
result_location = os.path.join(
self.private_datasets_root, result_location
)
return result_location, dataset_name, load_from_disk
def prepare_code_contest_split_for_eval(
ds, evaluation_test_type="public_tests", task_name_column="name",
path_to_solutions_column="solutions.solution"
):
solutions = ds.flatten()
solutions = solutions.rename_column(
path_to_solutions_column, "solution_candidates"
)
solutions = solutions.rename_column(task_name_column, "task_name")
solutions = solutions.select_columns(["task_name", "solution_candidates"])
return CodeContestDataProvider.prepare_for_evaluation(
predictions=solutions,
source_of_truth=ds,
evaluation_test_type=evaluation_test_type,
)
def show(self, ds, paths_to_python, paths_to_free_text):
result = ds.flatte()
def format_example(example):
for code_col in paths_to_python:
import black
example[code_col] = black.format_str(example[code_col])
for col in paths_to_free_text:
example[col] = example[col].replace("\\n", "\n")
pretty = result.map(format_example)
return pretty
def load_dataset(self):
if self.load_from_disk:
f = load_from_disk
else:
f = load_dataset
return f(self.dataset_location)
def connect(self, ds):
if hasattr(ds, "keys"):
for split in self.dataset.keys():
split_ds = self.dataset[split]
table = split_ds.data.table
self.connection.register(f"{split_ds.info.dataset_name}_{split}", table)
else:
self.connection.register(f"{ds.info.dataset_name}", ds.data.table)
def get_splits(self):
return self.dataset.keys()
def sample(ds, fraction=0.1):
table = ds
sample_size = int(len(table) * fraction)
indices = np.random.choice(len(table), sample_size, replace=False)
sampled_table = table.select(indices)
return sampled_table
def query(self, query_string) -> pd.DataFrame:
return self.connection.query(query_string).df()
def translate_references(self, ds):
expand = False
if not isinstance(ds, DatasetDict):
to_translate = {"ds": ds}
expand = True
else:
to_translate = ds
for ds_name, ds_val in to_translate.items():
for col in problem_translations:
translated_col = ds_val.features[col].int2str(ds_val[col])
ds_val = ds_val.remove_columns([col])
ds_val = ds_val.add_column(col, translated_col)
def translate_sequence_references(example, ds):
for col in solution_translations:
translator = ds.features[col].feature["language"]
arr = example[col]["language"]
translated_solution = [translator.int2str(item) for item in arr]
example[col]["language"] = translated_solution
return example
new_features = ds_val.features.copy()
for col in solution_translations:
new_features[col] = Sequence(
feature={"language": Value("string"), "solution": Value("string")}
)
ds_val = ds_val.map(
lambda example, ds=ds_val: translate_sequence_references(
example=example, ds=ds
),
features=new_features,
)
to_translate[ds_name] = ds_val
result = to_translate
if expand:
result = result[ds]
return result
def filter_solution_by_languages(self, ds, languages: Iterable[str], keep=True):
languages_set = set(languages)
def filter_solutions_by_languages(example):
for sol_col in solution_translations:
langs = example[sol_col]["language"]
sols = example[sol_col]["solution"]
filtered_languages = [
lang for lang in langs if (lang in languages_set) == keep
]
filtered_solutions = [
s
for idx, s in enumerate(sols)
if (langs[idx] in languages_set) == keep
]
example[sol_col] = {
"language": filtered_languages,
"solution": filtered_solutions,
}
return example
ds = ds.map(filter_solutions_by_languages)
return ds
class CodeContestsCompetitor:
def __init__(self):
self.prompt = {}
for set in get_settings():
if 'code_contests_prompt' in set.lower():
self.prompt[set.lower()] = get_settings()[set]
self.ai_handler = AiHandler()
def render(self, problem_json, prompt: str):
environment = Environment(undefined=StrictUndefined)
environment.globals["zip"] = zip
environment.globals["enumerate"] = enumerate
sys_prompt = environment.from_string(self.prompt[prompt].system).render(problem_json)
usr_prompt = environment.from_string(self.prompt[prompt].user).render(problem_json)
if hasattr(self.prompt[prompt], 'temperature'):
temperature = self.prompt[prompt].temperature
else:
temperature = 0.2
if hasattr(self.prompt[prompt], 'frequency_penalty'):
frequency_penalty = self.prompt[prompt].frequency_penalty
else:
frequency_penalty = None
return sys_prompt, usr_prompt, temperature, frequency_penalty
async def _run(self, model, problem, prompt:str = "code_contests_prompt_reflect"):
system_prompt, user_prompt, temperature, frequency_penalty = self.render(problem, prompt)
if frequency_penalty == None:
frequency_penalty = get_settings().get("config.frequency_penalty")
response, finish_reason = await self.ai_handler.chat_completion(
model=model, system=system_prompt, user=user_prompt,
temperature=temperature, frequency_penalty=frequency_penalty,
)
return response, finish_reason
async def run(self, problem, iteration=0, logger_ext=None):
if logger_ext:
logger = logger_ext
else:
logger = get_logger(__name__)
logger.info(f"Running code contests competitor, model {get_settings().config['model']}")
try:
if get_settings().get("solve.use_baseline", False):
problem['code_recent_solution'] = await run_baseline(self, problem)
else:
# configurations
problem = set_configurations(problem, iteration)
# self-reflect
problem = await run_self_reflect(self, problem)
# generate solutions
problem = await run_generate_possible_solutions(self, problem)
# choose best solution
problem = await run_choose_best_solution(self, problem)
# generate ai tests
problem = await run_generate_ai_tests(self, problem)
# initial code generation
problem = await run_initial_code_generation(self, problem)
# evaluate on public tests
problem = await run_evaluate_public_tests(self, problem)
# evaluate on ai tests
problem = await run_evaluate_all_ai_tests(self, problem)
return problem['code_recent_solution']
except Exception as e:
logging.error(f"Error: {e}")
return ""
def solve_problem_in_dataset(self, example, iteration=0, logger_ext=None):
problem = {k: example.get(k) for k in ["name", "description", 'public_tests']}
prediction = asyncio.run(self.run(problem=problem, iteration=iteration, logger_ext=logger_ext))
return prediction
def evaluate_solution_on_subset(evaluation_test_type, problem, solution, silent=False, break_on_timeout=True):
# evaluate solution
test_results = None
if evaluation_test_type:
test_results = eval_solution(evaluation_test_type=evaluation_test_type, example=problem, prediction=solution,
silent=silent, break_on_timeout=break_on_timeout)
if test_results[1] == []:
if not silent:
logger.info("=====================================")
logger.info("No tests")
logger.info("=====================================")
return test_results, 0, 0, 0
if (hasattr(test_results[1], 'compilation_result') and
test_results[1].compilation_result.program_status.name == 'kTimeout'):
if not silent:
logger.info("=====================================")
logger.info("Timeout")
logger.info("=====================================")
return test_results, 0, 0, len(test_results[0])
test_passed = 0
test_failed = 0
test_timeout = 0
if not problem[evaluation_test_type]['input']:
logger.info(f"No {evaluation_test_type} for this problem")
else:
for test in test_results[1].test_results:
if (hasattr(test, 'program_status') and test.program_status.name == 'kTimeout'):
test_timeout += 1
elif not test.passed:
test_failed += 1
else:
test_passed += 1
if not silent:
logger.info("=====================================")
logger.info(f"test_passed: {test_passed}, test_failed: {test_failed}, test_timeout: {test_timeout}")
logger.info("=====================================")
return test_results, test_passed, test_failed, test_timeout
def setup_logger(logger_path: str = "./example.log",
level: str = "INFO",
fmt: LoggingFormat = LoggingFormat.CONSOLE):
level: int = logging.getLevelName(level.upper())
if type(level) is not int:
level = logging.INFO
fileHandler = logging.FileHandler(logger_path, mode='w')
if fmt == LoggingFormat.JSON:
logger.remove(None)
logger.add(
sys.stdout,
level=level,
format="{message}",
colorize=False,
serialize=True,
)
elif fmt == LoggingFormat.CONSOLE:
logger.remove(None)
logger.add(sys.stdout, level=level, colorize=True)
logger.add(fileHandler, level=logging.DEBUG)
return logger
def get_settings():
return global_settings
def solve_dataset(dataset_name='valid_and_test_processed',
split_name='valid',
database_solution_path='solution_database.json'):
# load dataset
data_provider = CodeContestDataProvider(dataset_location=dataset_name)
setting = get_settings()
num_problems = len(data_provider.dataset[split_name])
base_path = os.getcwd()
setting.solve.reduce_verbose = True
## load previous solution-database if exists
try:
with open(database_solution_path, 'r') as f:
database = json.load(f)
database[split_name] = OrderedDict(sorted(database[split_name].items(), key=lambda x: int(x[0])))
except:
print(f"Failed to load database from {database_solution_path}")
database = {split_name: {}}
# iterate on problems
for problem_number in range(0, num_problems):
# skip if already ran
logger = setup_logger()
num_iterations = setting.get("dataset.num_iterations", 1)
prev = database[split_name].get(str(problem_number), {}).get(f'iteration_{num_iterations-1}', {})
if not ((prev == {}) or (prev is None)):
print(f"problem_number {problem_number} already ran")
continue
# check if problem is valid (at least one of the provided solutions actually passes the generated tests)
if data_provider.dataset[split_name][problem_number].get('is_valid_problem', True) is False:
logger.info(f"problem {problem_number} is not valid")
continue
os.chdir(base_path)
logger.info(f"problem_number: {problem_number}")
problem_name = data_provider.dataset[split_name][int(problem_number)]['name']
logger.info(f"problem_name: {problem_name}")
problem = data_provider.find_problem(ds=data_provider.dataset, problem_name=problem_name, split_name=split_name)
logger.info(f"problem['cf_tags']: {problem['cf_tags']}")
# solve problem
problem_database = {problem_number: {}}
solver = CodeContestsCompetitor()
for iteration in range(setting.get("dataset.num_iterations", 1)):
it_str = f"iteration_{iteration}"
problem_database[problem_number][it_str] = {}
# skip if iteration already ran
prev_iter = database[split_name].get(str(problem_number), {}).get(it_str, {})
if not ((prev_iter == {}) or (prev_iter is None)):
print(f"prev_iter {iteration} already ran")
problem_database[problem_number][it_str] = prev_iter
if is_solved(prev_iter):
logger.info(f"codium solved problem {problem_number} in iteration {iteration}")
break
continue
# solve problem
solution = solver.solve_problem_in_dataset(problem, iteration, logger)
logger.info(f"solution code:\n{solution}")
if not solution:
logger.info(f"Failed to solve problem {problem_number} in iteration {iteration}")
continue
logger.info(f"Evaluating solution on public tests...")
test_results, test_passed_public, test_failed_public, test_timeout_public = evaluate_solution_on_subset(
'public_tests', problem, solution, silent=True)
logger.info(f"evaluating solution on private tests...")
test_results, test_passed_private, test_failed_private, test_timeout_private = evaluate_solution_on_subset(
'private_tests', problem, solution, silent=True)
logger.info(f"evaluating solution on generated tests...")
test_results, test_passed_generate, test_failed_generate, test_timeout_generate = evaluate_solution_on_subset(
'generated_tests', problem, solution, silent=True)
logger.info(
f"\ntest_passed_public: {test_passed_public}, test_failed_public: {test_failed_public}, test_timeout_public: {test_timeout_public}\n"
f"test_passed_private: {test_passed_private}, test_failed_private: {test_failed_private}, test_timeout_private: {test_timeout_private}\n"
f"test_passed_generate: {test_passed_generate}, test_failed_generate: {test_failed_generate}, test_timeout_generate: {test_timeout_generate}\n")
problem_database[problem_number][it_str]['solution'] = solution
problem_database[problem_number][it_str]['test_passed_private'] = test_passed_private
problem_database[problem_number][it_str]['test_failed_private'] = test_failed_private
problem_database[problem_number][it_str]['test_timeout_private'] = test_timeout_private
problem_database[problem_number][it_str]['test_passed_generate'] = test_passed_generate
problem_database[problem_number][it_str]['test_failed_generate'] = test_failed_generate
problem_database[problem_number][it_str]['test_timeout_generate'] = test_timeout_generate
problem_database[problem_number][it_str]['test_passed_public'] = test_passed_public
problem_database[problem_number][it_str]['test_failed_public'] = test_failed_public
problem_database[problem_number][it_str]['test_timeout_public'] = test_timeout_public
os.chdir(base_path)
if is_solved(problem_database[problem_number][it_str]):
logger.info(f"codium solved problem {problem_number} in iteration {iteration}")
break
else:
logger.info(f"codium failed to solve problem {problem_number} in iteration {iteration}")
database[split_name][problem_number] = problem_database[problem_number]
os.chdir(base_path)
with open(database_solution_path, 'w') as f:
json.dump(database, f) | null |
153,425 | import copy
import logging
from alpha_codium.settings.config_loader import get_settings
from alpha_codium.gen.stages.run_initial_solve import run_initial_solve
from alpha_codium.gen.stages.run_tests import run_tests
from alpha_codium.log import get_logger
logger = get_logger(__name__)
def get_settings():
async def run_initial_solve(self, problem):
def run_tests(self, problem, counter, test_inputs, test_outputs):
async def run_initial_code_generation(self, problem):
counter_retry = 0
while True:
try:
logger.info("--run initial code generation stage--")
max_attempts = get_settings().get('initial_code_generation.max_attempts', 5)
counter = 0
# set the public tests as input
test_input = problem['public_tests']['input']
test_output = problem['public_tests']['output']
# generate an initial code, using the top solution from the previous stage
problem = await run_initial_solve(self, problem)
# run the solution on the selected tests
problem, passed_tests, non_empty_output, error_str, trace_str, tests_timeout, d_tot \
= run_tests(self, problem, counter, test_input, test_output)
best_solution = copy.deepcopy(problem['code_recent_solution'])
best_d = float('inf') # distance to the correct solution
# set the distance to the correct solution
if -1 < d_tot < best_d:
best_solution = copy.deepcopy(problem['code_recent_solution'])
best_d = d_tot
while not passed_tests:
counter += 1
if counter > max_attempts:
logger.error(f"Failed to pass tests after {counter - 1} attempts. exiting the stage")
break
s_best_solution_original = problem['s_best_solution']
if counter > 1 and 's_possible_solutions' in problem:
# give two attempts to the highest ranked solution
problem['s_best_solution'] = problem['s_possible_solutions'][
counter % len(problem['s_possible_solutions'])]
problem = await run_initial_solve(self, problem)
problem['s_best_solution'] = s_best_solution_original
problem, passed_tests, non_empty_output, error_str, trace_str, tests_timeout, d_tot \
= run_tests(self, problem, counter, test_input, test_output)
if passed_tests:
logger.info(f"Passed tests after {counter} attempts")
break
else:
logger.info(f"Failed to pass tests after {counter} attempts, d: {d_tot}, best_d so far: {best_d}")
# save the best solution so far
if -1 < d_tot < best_d:
best_solution = copy.deepcopy(problem['code_recent_solution'])
best_d = d_tot
# set the best solution
if not passed_tests and best_d < float('inf'):
logger.error(f'Reverting to best solution so far, d_tot: {best_d}')
problem['code_recent_solution'] = best_solution
return problem
except Exception as e:
logging.error(f"'initial code generation' stage, counter_retry {counter_retry}, Error: {e}")
counter_retry += 1
if counter_retry > 2:
raise e | null |
153,426 | import functools
import logging
from alpha_codium.gen.utils import postprocess_response
from alpha_codium.llm.ai_invoker import send_inference
from alpha_codium.log import get_logger
def postprocess_response(response):
response = str(response)
if response.endswith("stop"):
response = response[:-4]
pattern = r'```\w*\n(.*?)```'
matches = re.findall(pattern, response, re.DOTALL)
if matches:
response = matches[0]
return response
async def send_inference(f: Callable):
all_models = _get_all_models()
all_deployments = _get_all_deployments(all_models)
# try each (model, deployment_id) pair until one is successful, otherwise raise exception
for i, (model, deployment_id) in enumerate(zip(all_models, all_deployments)):
try:
get_settings().set("openai.deployment_id", deployment_id)
return await f(model)
except Exception:
logging.warning(
f"Failed to generate prediction with {model}"
f"{(' from deployment ' + deployment_id) if deployment_id else ''}: "
f"{traceback.format_exc()}"
)
if i == len(all_models) - 1: # If it's the last iteration
raise # Re-raise the last exception
async def run_baseline(self, problem):
try:
logging.info("Using baseline prompt")
f = functools.partial(self._run, problem=problem, prompt="code_contests_prompts_baseline")
response_baseline, _ = await send_inference(f)
recent_solution = postprocess_response(response_baseline)
return recent_solution
except Exception as e:
logging.error(f"Error: {e}")
exit(-1) | null |
153,427 | from alpha_codium.log import get_logger
logger = get_logger(__name__)
def set_configurations(problem, iteration=0):
# configurations
problem = {k: problem.get(k) for k in ["name", "description", "public_tests"]}
problem['iteration'] = iteration
# initialize passed tests field
problem['passed_tests'] = {}
problem['passed_tests']['inputs'] = []
problem['passed_tests']['outputs'] = []
# shorter description, without the input-output examples
if '\nExample\n' in problem['description']:
problem['description_short'] = problem['description'].split('\nExample\n')[0].strip()
elif '\nExamples\n' in problem['description']:
problem['description_short'] = problem['description'].split('\nExamples\n')[0].strip()
else:
logger.info(f"could not split description to short description, description: {problem['description']}")
problem['description_short'] = problem['description']
return problem | null |
153,428 | import functools
import logging
from alpha_codium.llm.ai_invoker import send_inference
from alpha_codium.log import get_logger
from alpha_codium.gen.utils import load_yaml
from alpha_codium.settings.config_loader import get_settings
logger = get_logger(__name__)
def choose_prompt():
if get_settings().get("solve.use_direct_solutions", False):
return "code_contests_prompts_choose_best_solution_direct"
else:
return "code_contests_prompts_choose_best_solution"
async def send_inference(f: Callable):
all_models = _get_all_models()
all_deployments = _get_all_deployments(all_models)
# try each (model, deployment_id) pair until one is successful, otherwise raise exception
for i, (model, deployment_id) in enumerate(zip(all_models, all_deployments)):
try:
get_settings().set("openai.deployment_id", deployment_id)
return await f(model)
except Exception:
logging.warning(
f"Failed to generate prediction with {model}"
f"{(' from deployment ' + deployment_id) if deployment_id else ''}: "
f"{traceback.format_exc()}"
)
if i == len(all_models) - 1: # If it's the last iteration
raise # Re-raise the last exception
def load_yaml(response_text: str, keys_fix_yaml: List[str] = []) -> dict:
response_text = response_text.rstrip("` \n")
response_text = response_text.removeprefix('```yaml').rstrip('`')
try:
data = yaml.safe_load(response_text)
except Exception as e:
data = try_fix_yaml(response_text, keys_fix_yaml=keys_fix_yaml)
if not data:
get_logger().info(f"Failed to parse AI YAML prediction: {e}")
return data
async def run_choose_best_solution(self, problem):
counter_retry = 0
while True:
try:
logger.info("--choose best solution stage--")
# get settings
f = functools.partial(self._run, problem=problem, prompt=choose_prompt())
# inference
response_best_solution, _ = await send_inference(f)
response_best_solution_yaml = load_yaml(response_best_solution,
keys_fix_yaml=["name:", "content:", "why:", "- "])
# update best solution
problem['s_best_solution'] = response_best_solution
if 's_possible_solutions' in problem:
problem['s_other_solutions'] = []
for solution in problem['s_possible_solutions']:
if solution['name'] != response_best_solution_yaml['name']:
problem['s_other_solutions'].append(solution)
return problem
except Exception as e:
logging.error(f"'run_choose_best_solution' stage, counter_retry {counter_retry}, Error: {e}")
counter_retry += 1
if counter_retry > 2:
raise e | null |
153,429 | import copy
import functools
import logging
import yaml
from alpha_codium.gen.utils import load_yaml
from alpha_codium.settings.config_loader import get_settings
from alpha_codium.llm.ai_invoker import send_inference
from alpha_codium.log import get_logger
logger = get_logger(__name__)
def load_yaml(response_text: str, keys_fix_yaml: List[str] = []) -> dict:
response_text = response_text.rstrip("` \n")
response_text = response_text.removeprefix('```yaml').rstrip('`')
try:
data = yaml.safe_load(response_text)
except Exception as e:
data = try_fix_yaml(response_text, keys_fix_yaml=keys_fix_yaml)
if not data:
get_logger().info(f"Failed to parse AI YAML prediction: {e}")
return data
def get_settings():
return global_settings
async def send_inference(f: Callable):
all_models = _get_all_models()
all_deployments = _get_all_deployments(all_models)
# try each (model, deployment_id) pair until one is successful, otherwise raise exception
for i, (model, deployment_id) in enumerate(zip(all_models, all_deployments)):
try:
get_settings().set("openai.deployment_id", deployment_id)
return await f(model)
except Exception:
logging.warning(
f"Failed to generate prediction with {model}"
f"{(' from deployment ' + deployment_id) if deployment_id else ''}: "
f"{traceback.format_exc()}"
)
if i == len(all_models) - 1: # If it's the last iteration
raise # Re-raise the last exception
async def run_generate_possible_solutions(self, problem):
counter_retry = 0
while True:
try:
logger.info("--generate possible solutions stage--")
if get_settings().get("solve.use_direct_solutions", False):
return problem
# get settings
problem['max_num_of_possible_solutions'] = get_settings().get('possible_solutions.max_num_of_possible_solutions')
problem['use_test_explanations_possible_solutions'] = get_settings().get('possible_solutions.use_test_explanations')
f = functools.partial(self._run, problem=problem, prompt="code_contests_prompt_generate_possible_solutions")
# inference
response_possible_solutions, _ = await send_inference(f)
response_possible_solutions_yaml = load_yaml(response_possible_solutions)
if get_settings().get('possible_solutions.remove_bruce_force_solutions'):
for i, s in enumerate(response_possible_solutions_yaml['possible_solutions']):
if 'brute' in s['name'].lower():
response_possible_solutions_yaml['possible_solutions'].pop(i)
response_possible_solutions = yaml.dump(response_possible_solutions_yaml, sort_keys=False, line_break="\n")
break
problem['s_possible_solutions'] = response_possible_solutions_yaml['possible_solutions']
problem['s_possible_solutions_str'] = response_possible_solutions.split('possible_solutions:')[1].strip()
return problem
except Exception as e:
logging.error(f"'possible solutions' stage, counter_retry {counter_retry}, Error: {e}")
counter_retry += 1
if counter_retry > 2:
raise e | null |
153,430 | import functools
import logging
import yaml
from alpha_codium.gen.stages.indirect.run_fix_self_reflect import run_validate_self_reflect
from alpha_codium.settings.config_loader import get_settings
from alpha_codium.gen.utils import postprocess_response
from alpha_codium.llm.ai_invoker import send_inference
from alpha_codium.log import get_logger
logger = get_logger(__name__)
async def run_validate_self_reflect(self, problem):
try:
logger.info("--validate reflection stage--")
f = functools.partial(self._run, problem=problem, prompt="code_contests_prompts_validate_reflection")
# inference
response_validate_reflect, _ = await send_inference(f)
response_validate_reflect = response_validate_reflect.rstrip("` \n")
if response_validate_reflect.startswith("```yaml"):
response_validate_reflect = response_validate_reflect[8:]
try:
response_validate_reflect_yaml = yaml.safe_load(response_validate_reflect)
except yaml.YAMLError:
response_validate_reflect = postprocess_response(response_validate_reflect) # try to include only the yaml part
response_validate_reflect_yaml = yaml.safe_load(response_validate_reflect)
# check number of tests
actual_number_of_tests = len(problem['public_tests']['input'])
calculated_number_of_tests = len(response_validate_reflect_yaml['fixed_tests_explanations'])
if actual_number_of_tests != calculated_number_of_tests:
raise (f"Error: number of tests in validate self-reflection ({calculated_number_of_tests}) "
f"does not match the actual number of tests ({actual_number_of_tests})")
problem['response_validate_self_reflect'] = response_validate_reflect
problem['tests_explanations'] = response_validate_reflect_yaml['fixed_tests_explanations']
problem['tests_explanations_str'] = response_validate_reflect.split('tests_explanations:')[1]
# re-order the public tests from easiest to hardest
problem['public_tests']['original'] = copy.deepcopy(problem['public_tests'])
problem['public_tests']['input'] = [t['input'] for t in problem['tests_explanations']]
problem['public_tests']['output'] = [t['output'] for t in problem['tests_explanations']]
problem['public_tests']['explanation'] = [t['explanation'] for t in problem['tests_explanations']]
return problem
except Exception as e:
logging.error(f"Failed 'run_validate_self_reflect', Error: {e}")
return problem
def get_settings():
return global_settings
def postprocess_response(response):
response = str(response)
if response.endswith("stop"):
response = response[:-4]
pattern = r'```\w*\n(.*?)```'
matches = re.findall(pattern, response, re.DOTALL)
if matches:
response = matches[0]
return response
async def send_inference(f: Callable):
all_models = _get_all_models()
all_deployments = _get_all_deployments(all_models)
# try each (model, deployment_id) pair until one is successful, otherwise raise exception
for i, (model, deployment_id) in enumerate(zip(all_models, all_deployments)):
try:
get_settings().set("openai.deployment_id", deployment_id)
return await f(model)
except Exception:
logging.warning(
f"Failed to generate prediction with {model}"
f"{(' from deployment ' + deployment_id) if deployment_id else ''}: "
f"{traceback.format_exc()}"
)
if i == len(all_models) - 1: # If it's the last iteration
raise # Re-raise the last exception
async def run_self_reflect(self, problem):
counter_retry = 0
while True:
try:
logger.info("--reflection stage--")
# get settings
validate_self_reflection = get_settings().get('self_reflection.validate_self_reflection', False)
actual_number_of_tests = len(problem['public_tests']['input'])
problem['actual_number_of_tests'] = actual_number_of_tests
f = functools.partial(self._run, problem=problem, prompt="code_contests_prompt_reflect")
# inference
response_reflect, _ = await send_inference(f)
response_reflect = response_reflect.rstrip("` \n")
if response_reflect.startswith("```yaml"):
response_reflect = response_reflect[8:]
try:
response_reflect_yaml = yaml.safe_load(response_reflect)
except yaml.YAMLError:
response_reflect = postprocess_response(response_reflect) # try to include only the yaml part
response_reflect_yaml = yaml.safe_load(response_reflect)
# check number of tests
actual_number_of_tests = len(problem['public_tests']['input'])
calculated_number_of_tests = len(response_reflect_yaml['tests_explanations'])
if actual_number_of_tests != calculated_number_of_tests:
raise (f"Error: number of tests in self-reflection ({calculated_number_of_tests}) "
f"does not match the actual number of tests ({actual_number_of_tests})")
problem['response_reflect'] = response_reflect
try:
problem['self_reflection'] = '- ' + '\n- '.join(response_reflect_yaml['self_reflection'])
if problem['self_reflection'].startswith('- - '):
problem['self_reflection'] = problem['self_reflection'][2:]
except:
problem['self_reflection'] = response_reflect_yaml['self_reflection']
problem['tests_explanations'] = response_reflect_yaml['tests_explanations']
problem['tests_explanations_str'] = response_reflect.split('tests_explanations:')[1]
# double validation self-reflection
if validate_self_reflection:
problem = await run_validate_self_reflect(self, problem)
for s in problem['tests_explanations']:
s['input'] = s['input'].replace('\\n', '\n')
s['output'] = s['output'].replace('\\n', '\n')
s['explanation'] = s['explanation'].replace('\\n', '\n')
return problem
except Exception as e:
logging.error(f"'run_self_reflect' stage, counter_retry {counter_retry}, Error: {e}")
counter_retry += 1
if counter_retry > 2:
raise e | null |
153,433 | import argparse, os, sys, datetime, glob, importlib, csv
import numpy as np
import time
import torch
import torchvision
import pytorch_lightning as pl
from packaging import version
from omegaconf import OmegaConf
from torch.utils.data import random_split, DataLoader, Dataset, Subset
from functools import partial
from PIL import Image
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import rank_zero_info
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
class Txt2ImgIterableBaseDataset(IterableDataset):
def __init__(self, num_records=0, valid_ids=None, size=256):
def __len__(self):
def __iter__(self):
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
worker_id = worker_info.id
if isinstance(dataset, Txt2ImgIterableBaseDataset):
split_size = dataset.num_records // worker_info.num_workers
# reset num_records to the true number to retain reliable length information
dataset.sample_ids = dataset.valid_ids[worker_id * split_size:(worker_id + 1) * split_size]
current_id = np.random.choice(len(np.random.get_state()[1]), 1)
return np.random.seed(np.random.get_state()[1][current_id] + worker_id)
else:
return np.random.seed(np.random.get_state()[1][0] + worker_id) | null |
153,444 | import argparse, os, sys, glob, datetime, yaml
import torch
import time
import numpy as np
from tqdm import trange
from omegaconf import OmegaConf
from PIL import Image
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import instantiate_from_config
def custom_to_np(x):
# saves the batch in adm style as in https://github.com/openai/guided-diffusion/blob/main/scripts/image_sample.py
sample = x.detach().cpu()
sample = ((sample + 1) * 127.5).clamp(0, 255).to(torch.uint8)
sample = sample.permute(0, 2, 3, 1)
sample = sample.contiguous()
return sample
def make_convolutional_sample(model, batch_size, vanilla=False, custom_steps=None, eta=1.0,):
log = dict()
shape = [batch_size,
model.model.diffusion_model.in_channels,
model.model.diffusion_model.image_size,
model.model.diffusion_model.image_size]
with model.ema_scope("Plotting"):
t0 = time.time()
if vanilla:
sample, progrow = convsample(model, shape,
make_prog_row=True)
else:
sample, intermediates = convsample_ddim(model, steps=custom_steps, shape=shape,
eta=eta)
t1 = time.time()
x_sample = model.decode_first_stage(sample)
log["sample"] = x_sample
log["time"] = t1 - t0
log['throughput'] = sample.shape[0] / (t1 - t0)
print(f'Throughput for this batch: {log["throughput"]}')
return log
def save_logs(logs, path, n_saved=0, key="sample", np_path=None):
for k in logs:
if k == key:
batch = logs[key]
if np_path is None:
for x in batch:
img = custom_to_pil(x)
imgpath = os.path.join(path, f"{key}_{n_saved:06}.png")
img.save(imgpath)
n_saved += 1
else:
npbatch = custom_to_np(batch)
shape_str = "x".join([str(x) for x in npbatch.shape])
nppath = os.path.join(np_path, f"{n_saved}-{shape_str}-samples.npz")
np.savez(nppath, npbatch)
n_saved += npbatch.shape[0]
return n_saved
def run(model, logdir, batch_size=50, vanilla=False, custom_steps=None, eta=None, n_samples=50000, nplog=None):
if vanilla:
print(f'Using Vanilla DDPM sampling with {model.num_timesteps} sampling steps.')
else:
print(f'Using DDIM sampling with {custom_steps} sampling steps and eta={eta}')
tstart = time.time()
n_saved = len(glob.glob(os.path.join(logdir,'*.png')))-1
# path = logdir
if model.cond_stage_model is None:
all_images = []
print(f"Running unconditional sampling for {n_samples} samples")
for _ in trange(n_samples // batch_size, desc="Sampling Batches (unconditional)"):
logs = make_convolutional_sample(model, batch_size=batch_size,
vanilla=vanilla, custom_steps=custom_steps,
eta=eta)
n_saved = save_logs(logs, logdir, n_saved=n_saved, key="sample")
all_images.extend([custom_to_np(logs["sample"])])
if n_saved >= n_samples:
print(f'Finish after generating {n_saved} samples')
break
all_img = np.concatenate(all_images, axis=0)
all_img = all_img[:n_samples]
shape_str = "x".join([str(x) for x in all_img.shape])
nppath = os.path.join(nplog, f"{shape_str}-samples.npz")
np.savez(nppath, all_img)
else:
raise NotImplementedError('Currently only sampling for unconditional models supported.')
print(f"sampling of {n_saved} images finished in {(time.time() - tstart) / 60.:.2f} minutes.") | null |
153,446 | import argparse, os, sys, glob, datetime, yaml
import torch
import time
import numpy as np
from tqdm import trange
from omegaconf import OmegaConf
from PIL import Image
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import instantiate_from_config
def load_model_from_config(config, sd):
def load_model(config, ckpt, gpu, eval_mode):
if ckpt:
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
global_step = pl_sd["global_step"]
else:
pl_sd = {"state_dict": None}
global_step = None
model = load_model_from_config(config.model,
pl_sd["state_dict"])
return model, global_step | null |
153,451 | from torchvision.datasets.utils import download_url
from ldm.util import instantiate_from_config
import torch
import os
from google.colab import files
from IPython.display import Image as ipyimg
import ipywidgets as widgets
from PIL import Image
from numpy import asarray
from einops import rearrange, repeat
import torch, torchvision
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import ismap
import time
from omegaconf import OmegaConf
def download_models(mode):
if mode == "superresolution":
# this is the small bsr light model
url_conf = 'https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1'
url_ckpt = 'https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1'
path_conf = 'logs/diffusion/superresolution_bsr/configs/project.yaml'
path_ckpt = 'logs/diffusion/superresolution_bsr/checkpoints/last.ckpt'
download_url(url_conf, path_conf)
download_url(url_ckpt, path_ckpt)
path_conf = path_conf + '/?dl=1' # fix it
path_ckpt = path_ckpt + '/?dl=1' # fix it
return path_conf, path_ckpt
else:
raise NotImplementedError
def load_model_from_config(config, ckpt):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
global_step = pl_sd["global_step"]
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
model.cuda()
model.eval()
return {"model": model}, global_step
def get_model(mode):
path_conf, path_ckpt = download_models(mode)
config = OmegaConf.load(path_conf)
model, step = load_model_from_config(config, path_ckpt)
return model | null |
153,479 | import math
import torch
import torch.nn as nn
import numpy as np
from einops import rearrange
from ldm.util import instantiate_from_config
from ldm.modules.attention import LinearAttention
class LinAttnBlock(LinearAttention):
def __init__(self, in_channels):
class AttnBlock(nn.Module):
def __init__(self, in_channels):
def forward(self, x):
def make_attn(in_channels, attn_type="vanilla"):
assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown'
print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
if attn_type == "vanilla":
return AttnBlock(in_channels)
elif attn_type == "none":
return nn.Identity(in_channels)
else:
return LinAttnBlock(in_channels) | null |
153,556 | import torch
import torch.nn.functional as F
import math
def expand_dims(v, dims):
"""
Expand the tensor `v` to the dim `dims`.
Args:
`v`: a PyTorch tensor with shape [N].
`dim`: a `int`.
Returns:
a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
"""
return v[(...,) + (None,)*(dims - 1)]
The provided code snippet includes necessary dependencies for implementing the `model_wrapper` function. Write a Python function `def model_wrapper( model, noise_schedule, model_type="noise", model_kwargs={}, guidance_type="uncond", condition=None, unconditional_condition=None, guidance_scale=1., classifier_fn=None, classifier_kwargs={}, )` to solve the following problem:
Create a wrapper function for the noise prediction model. DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. We support four types of the diffusion model by setting `model_type`: 1. "noise": noise prediction model. (Trained by predicting noise). 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). 3. "v": velocity prediction model. (Trained by predicting the velocity). The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." arXiv preprint arXiv:2202.00512 (2022). [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." arXiv preprint arXiv:2210.02303 (2022). 4. "score": marginal score function. (Trained by denoising score matching). Note that the score function and the noise prediction model follows a simple relationship: ``` noise(x_t, t) = -sigma_t * score(x_t, t) ``` We support three types of guided sampling by DPMs by setting `guidance_type`: 1. "uncond": unconditional sampling by DPMs. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` The input `classifier_fn` has the following format: `` classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) `` [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. The input `model` has the following format: `` model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score `` And if cond == `unconditional_condition`, the model output is the unconditional DPM output. [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." arXiv preprint arXiv:2207.12598 (2022). The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) or continuous-time labels (i.e. epsilon to T). We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: `` def model_fn(x, t_continuous) -> noise: t_input = get_model_input_time(t_continuous) return noise_pred(model, x, t_input, **model_kwargs) `` where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. =============================================================== Args: model: A diffusion model with the corresponding format described above. noise_schedule: A noise schedule object, such as NoiseScheduleVP. model_type: A `str`. The parameterization type of the diffusion model. "noise" or "x_start" or "v" or "score". model_kwargs: A `dict`. A dict for the other inputs of the model function. guidance_type: A `str`. The type of the guidance for sampling. "uncond" or "classifier" or "classifier-free". condition: A pytorch tensor. The condition for the guided sampling. Only used for "classifier" or "classifier-free" guidance type. unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. Only used for "classifier-free" guidance type. guidance_scale: A `float`. The scale for the guided sampling. classifier_fn: A classifier function. Only used for the classifier guidance. classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. Returns: A noise prediction model that accepts the noised data and the continuous time as the inputs.
Here is the function:
def model_wrapper(
model,
noise_schedule,
model_type="noise",
model_kwargs={},
guidance_type="uncond",
condition=None,
unconditional_condition=None,
guidance_scale=1.,
classifier_fn=None,
classifier_kwargs={},
):
"""Create a wrapper function for the noise prediction model.
DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
We support four types of the diffusion model by setting `model_type`:
1. "noise": noise prediction model. (Trained by predicting noise).
2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
3. "v": velocity prediction model. (Trained by predicting the velocity).
The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
[1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
arXiv preprint arXiv:2202.00512 (2022).
[2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
arXiv preprint arXiv:2210.02303 (2022).
4. "score": marginal score function. (Trained by denoising score matching).
Note that the score function and the noise prediction model follows a simple relationship:
```
noise(x_t, t) = -sigma_t * score(x_t, t)
```
We support three types of guided sampling by DPMs by setting `guidance_type`:
1. "uncond": unconditional sampling by DPMs.
The input `model` has the following format:
``
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
``
2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
The input `model` has the following format:
``
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
``
The input `classifier_fn` has the following format:
``
classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
``
[3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
The input `model` has the following format:
``
model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
``
And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
[4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
arXiv preprint arXiv:2207.12598 (2022).
The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
or continuous-time labels (i.e. epsilon to T).
We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
``
def model_fn(x, t_continuous) -> noise:
t_input = get_model_input_time(t_continuous)
return noise_pred(model, x, t_input, **model_kwargs)
``
where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
===============================================================
Args:
model: A diffusion model with the corresponding format described above.
noise_schedule: A noise schedule object, such as NoiseScheduleVP.
model_type: A `str`. The parameterization type of the diffusion model.
"noise" or "x_start" or "v" or "score".
model_kwargs: A `dict`. A dict for the other inputs of the model function.
guidance_type: A `str`. The type of the guidance for sampling.
"uncond" or "classifier" or "classifier-free".
condition: A pytorch tensor. The condition for the guided sampling.
Only used for "classifier" or "classifier-free" guidance type.
unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
Only used for "classifier-free" guidance type.
guidance_scale: A `float`. The scale for the guided sampling.
classifier_fn: A classifier function. Only used for the classifier guidance.
classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
Returns:
A noise prediction model that accepts the noised data and the continuous time as the inputs.
"""
def get_model_input_time(t_continuous):
"""
Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
For continuous-time DPMs, we just use `t_continuous`.
"""
if noise_schedule.schedule == 'discrete':
return (t_continuous - 1. / noise_schedule.total_N) * 1000.
else:
return t_continuous
def noise_pred_fn(x, t_continuous, cond=None):
if t_continuous.reshape((-1,)).shape[0] == 1:
t_continuous = t_continuous.expand((x.shape[0]))
t_input = get_model_input_time(t_continuous)
if cond is None:
output = model(x, t_input, **model_kwargs)
else:
output = model(x, t_input, cond, **model_kwargs)
if model_type == "noise":
return output
elif model_type == "x_start":
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
dims = x.dim()
return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
elif model_type == "v":
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
dims = x.dim()
return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
elif model_type == "score":
sigma_t = noise_schedule.marginal_std(t_continuous)
dims = x.dim()
return -expand_dims(sigma_t, dims) * output
def cond_grad_fn(x, t_input):
"""
Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
"""
with torch.enable_grad():
x_in = x.detach().requires_grad_(True)
log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
return torch.autograd.grad(log_prob.sum(), x_in)[0]
def model_fn(x, t_continuous):
"""
The noise predicition model function that is used for DPM-Solver.
"""
if t_continuous.reshape((-1,)).shape[0] == 1:
t_continuous = t_continuous.expand((x.shape[0]))
if guidance_type == "uncond":
return noise_pred_fn(x, t_continuous)
elif guidance_type == "classifier":
assert classifier_fn is not None
t_input = get_model_input_time(t_continuous)
cond_grad = cond_grad_fn(x, t_input)
sigma_t = noise_schedule.marginal_std(t_continuous)
noise = noise_pred_fn(x, t_continuous)
return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
elif guidance_type == "classifier-free":
if guidance_scale == 1. or unconditional_condition is None:
return noise_pred_fn(x, t_continuous, cond=condition)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t_continuous] * 2)
c_in = torch.cat([unconditional_condition, condition])
noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
return noise_uncond + guidance_scale * (noise - noise_uncond)
assert model_type in ["noise", "x_start", "v"]
assert guidance_type in ["uncond", "classifier", "classifier-free"]
return model_fn | Create a wrapper function for the noise prediction model. DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. We support four types of the diffusion model by setting `model_type`: 1. "noise": noise prediction model. (Trained by predicting noise). 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). 3. "v": velocity prediction model. (Trained by predicting the velocity). The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." arXiv preprint arXiv:2202.00512 (2022). [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." arXiv preprint arXiv:2210.02303 (2022). 4. "score": marginal score function. (Trained by denoising score matching). Note that the score function and the noise prediction model follows a simple relationship: ``` noise(x_t, t) = -sigma_t * score(x_t, t) ``` We support three types of guided sampling by DPMs by setting `guidance_type`: 1. "uncond": unconditional sampling by DPMs. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` The input `classifier_fn` has the following format: `` classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) `` [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. The input `model` has the following format: `` model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score `` And if cond == `unconditional_condition`, the model output is the unconditional DPM output. [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." arXiv preprint arXiv:2207.12598 (2022). The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) or continuous-time labels (i.e. epsilon to T). We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: `` def model_fn(x, t_continuous) -> noise: t_input = get_model_input_time(t_continuous) return noise_pred(model, x, t_input, **model_kwargs) `` where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. =============================================================== Args: model: A diffusion model with the corresponding format described above. noise_schedule: A noise schedule object, such as NoiseScheduleVP. model_type: A `str`. The parameterization type of the diffusion model. "noise" or "x_start" or "v" or "score". model_kwargs: A `dict`. A dict for the other inputs of the model function. guidance_type: A `str`. The type of the guidance for sampling. "uncond" or "classifier" or "classifier-free". condition: A pytorch tensor. The condition for the guided sampling. Only used for "classifier" or "classifier-free" guidance type. unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. Only used for "classifier-free" guidance type. guidance_scale: A `float`. The scale for the guided sampling. classifier_fn: A classifier function. Only used for the classifier guidance. classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. Returns: A noise prediction model that accepts the noised data and the continuous time as the inputs. |
153,563 | import argparse
import json
import sys
from pathlib import Path
import k_diffusion
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange, repeat
from omegaconf import OmegaConf
from PIL import Image
from pytorch_lightning import seed_everything
from tqdm import tqdm
from ldm.modules.attention import CrossAttention
from ldm.util import instantiate_from_config
from metrics.clip_similarity import ClipSimilarity
def to_d(x, sigma, denoised):
"""Converts a denoiser output to a Karras ODE derivative."""
return (x - denoised) / append_dims(sigma, x.ndim)
def get_ancestral_step(sigma_from, sigma_to):
"""Calculates the noise level (sigma_down) to step down to and the amount
of noise to add (sigma_up) when doing an ancestral sampling step."""
sigma_up = min(sigma_to, (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5)
sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
return sigma_down, sigma_up
class CrossAttention(nn.Module):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
super().__init__()
inner_dim = dim_head * heads
context_dim = default(context_dim, query_dim)
self.scale = dim_head ** -0.5
self.heads = heads
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, query_dim),
nn.Dropout(dropout)
)
self.prompt_to_prompt = False
def forward(self, x, context=None, mask=None):
is_self_attn = context is None
h = self.heads
q = self.to_q(x)
context = default(context, x)
k = self.to_k(context)
v = self.to_v(context)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
if self.prompt_to_prompt and is_self_attn:
# Unlike the original Prompt-to-Prompt which uses cross-attention layers, we copy attention maps for self-attention layers.
# There must be 4 elements in the batch: {conditional, unconditional} x {prompt 1, prompt 2}
assert x.size(0) == 4
sims = sim.chunk(4)
sim = torch.cat((sims[0], sims[0], sims[2], sims[2]))
if exists(mask):
mask = rearrange(mask, 'b ... -> b (...)')
max_neg_value = -torch.finfo(sim.dtype).max
mask = repeat(mask, 'b j -> (b h) () j', h=h)
sim.masked_fill_(~mask, max_neg_value)
# attention, what we cannot get enough of
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
return self.to_out(out)
The provided code snippet includes necessary dependencies for implementing the `sample_euler_ancestral` function. Write a Python function `def sample_euler_ancestral(model, x, sigmas, prompt2prompt_threshold=0.0, **extra_args)` to solve the following problem:
Ancestral sampling with Euler method steps.
Here is the function:
def sample_euler_ancestral(model, x, sigmas, prompt2prompt_threshold=0.0, **extra_args):
"""Ancestral sampling with Euler method steps."""
s_in = x.new_ones([x.shape[0]])
for i in range(len(sigmas) - 1):
prompt_to_prompt = prompt2prompt_threshold > i / (len(sigmas) - 2)
for m in model.modules():
if isinstance(m, CrossAttention):
m.prompt_to_prompt = prompt_to_prompt
denoised = model(x, sigmas[i] * s_in, **extra_args)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
d = to_d(x, sigmas[i], denoised)
# Euler method
dt = sigma_down - sigmas[i]
x = x + d * dt
if sigmas[i + 1] > 0:
# Make noise the same across all samples in batch.
x = x + torch.randn_like(x[:1]) * sigma_up
return x | Ancestral sampling with Euler method steps. |
153,564 | import argparse
import json
import sys
from pathlib import Path
import k_diffusion
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange, repeat
from omegaconf import OmegaConf
from PIL import Image
from pytorch_lightning import seed_everything
from tqdm import tqdm
from ldm.modules.attention import CrossAttention
from ldm.util import instantiate_from_config
from metrics.clip_similarity import ClipSimilarity
def instantiate_from_config(config):
if not "target" in config:
if config == '__is_first_stage__':
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
if vae_ckpt is not None:
print(f"Loading VAE from {vae_ckpt}")
vae_sd = torch.load(vae_ckpt, map_location="cpu")["state_dict"]
sd = {
k: vae_sd[k[len("first_stage_model.") :]] if k.startswith("first_stage_model.") else v
for k, v in sd.items()
}
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
return model | null |
153,565 | import argparse
import json
import sys
from pathlib import Path
import k_diffusion
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange, repeat
from omegaconf import OmegaConf
from PIL import Image
from pytorch_lightning import seed_everything
from tqdm import tqdm
from ldm.modules.attention import CrossAttention
from ldm.util import instantiate_from_config
from metrics.clip_similarity import ClipSimilarity
def to_pil(image: torch.Tensor) -> Image.Image:
image = 255.0 * rearrange(image.cpu().numpy(), "c h w -> h w c")
image = Image.fromarray(image.astype(np.uint8))
return image | null |
153,566 | from __future__ import annotations
import json
import time
from argparse import ArgumentParser
from pathlib import Path
from typing import Optional
import datasets
import numpy as np
import openai
from tqdm.auto import tqdm
DELIMITER_0 = "\n##\n"
DELIMITER_1 = "\n%%\n"
STOP = "\nEND"
def generate(
openai_model: str,
caption: str,
num_retries: int = 3,
max_tokens: int = 256,
temperature: float = 0.7,
top_p: float = 1.0,
frequency_penalty: float = 0.1,
presence_penalty: float = 0.0,
sleep_on_error: float = 1.0,
) -> Optional[tuple[str, str]]:
for _ in range(1 + num_retries):
try:
response = openai.Completion.create(
model=openai_model,
prompt=caption + DELIMITER_0,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
stop=[STOP],
)
except Exception as e:
print(e)
time.sleep(sleep_on_error)
continue
output = response["choices"][0]["text"].split(DELIMITER_1)
if len(output) == 2:
instruction, edited_caption = output
results = openai.Moderation.create([instruction, edited_caption])["results"]
if results[0]["flagged"] or results[1]["flagged"]:
continue
if caption.strip().strip(".!?").lower() != edited_caption.strip().strip(".!?").lower():
return instruction, edited_caption | null |
153,567 | import argparse, os, sys, datetime, glob
import numpy as np
import time
import torch
import torchvision
import pytorch_lightning as pl
import json
import pickle
from packaging import version
from omegaconf import OmegaConf
from torch.utils.data import DataLoader, Dataset
from functools import partial
from PIL import Image
import torch.distributed as dist
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import rank_zero_info
from pytorch_lightning.plugins import DDPPlugin
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument(
"-n",
"--name",
type=str,
const=True,
default="",
nargs="?",
help="postfix for logdir",
)
parser.add_argument(
"-r",
"--resume",
type=str,
const=True,
default="",
nargs="?",
help="resume from logdir or checkpoint in logdir",
)
parser.add_argument(
"-b",
"--base",
nargs="*",
metavar="base_config.yaml",
help="paths to base configs. Loaded from left-to-right. "
"Parameters can be overwritten or added with command-line options of the form `--key value`.",
default=list(),
)
parser.add_argument(
"-t",
"--train",
type=str2bool,
const=True,
default=False,
nargs="?",
help="train",
)
parser.add_argument(
"--no-test",
type=str2bool,
const=True,
default=False,
nargs="?",
help="disable test",
)
parser.add_argument(
"-p",
"--project",
help="name of new or path to existing project"
)
parser.add_argument(
"-d",
"--debug",
type=str2bool,
nargs="?",
const=True,
default=False,
help="enable post-mortem debugging",
)
parser.add_argument(
"-s",
"--seed",
type=int,
default=23,
help="seed for seed_everything",
)
parser.add_argument(
"-f",
"--postfix",
type=str,
default="",
help="post-postfix for default name",
)
parser.add_argument(
"-l",
"--logdir",
type=str,
default="logs",
help="directory for logging dat shit",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="scale base-lr by ngpu * batch_size * n_accumulate",
)
return parser | null |
153,568 | import argparse, os, sys, datetime, glob
import numpy as np
import time
import torch
import torchvision
import pytorch_lightning as pl
import json
import pickle
from packaging import version
from omegaconf import OmegaConf
from torch.utils.data import DataLoader, Dataset
from functools import partial
from PIL import Image
import torch.distributed as dist
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import rank_zero_info
from pytorch_lightning.plugins import DDPPlugin
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
def nondefault_trainer_args(opt):
parser = argparse.ArgumentParser()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args([])
return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k)) | null |
153,569 | import argparse, os, sys, datetime, glob
import numpy as np
import time
import torch
import torchvision
import pytorch_lightning as pl
import json
import pickle
from packaging import version
from omegaconf import OmegaConf
from torch.utils.data import DataLoader, Dataset
from functools import partial
from PIL import Image
import torch.distributed as dist
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import rank_zero_info
from pytorch_lightning.plugins import DDPPlugin
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
class Txt2ImgIterableBaseDataset(IterableDataset):
'''
Define an interface to make the IterableDatasets for text2img data chainable
'''
def __init__(self, num_records=0, valid_ids=None, size=256):
super().__init__()
self.num_records = num_records
self.valid_ids = valid_ids
self.sample_ids = valid_ids
self.size = size
print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.')
def __len__(self):
return self.num_records
def __iter__(self):
pass
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
worker_id = worker_info.id
if isinstance(dataset, Txt2ImgIterableBaseDataset):
split_size = dataset.num_records // worker_info.num_workers
# reset num_records to the true number to retain reliable length information
dataset.sample_ids = dataset.valid_ids[worker_id * split_size:(worker_id + 1) * split_size]
current_id = np.random.choice(len(np.random.get_state()[1]), 1)
return np.random.seed(np.random.get_state()[1][current_id] + worker_id)
else:
return np.random.seed(np.random.get_state()[1][0] + worker_id) | null |
153,570 | import argparse, os, sys, datetime, glob
import numpy as np
import time
import torch
import torchvision
import pytorch_lightning as pl
import json
import pickle
from packaging import version
from omegaconf import OmegaConf
from torch.utils.data import DataLoader, Dataset
from functools import partial
from PIL import Image
import torch.distributed as dist
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import rank_zero_info
from pytorch_lightning.plugins import DDPPlugin
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
The provided code snippet includes necessary dependencies for implementing the `all_gather` function. Write a Python function `def all_gather(data)` to solve the following problem:
Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank
Here is the function:
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
origin_size = None
if not isinstance(data, torch.Tensor):
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
else:
origin_size = data.size()
tensor = data.reshape(-1)
tensor_type = tensor.dtype
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.FloatTensor(size=(max_size,)).cuda().to(tensor_type))
if local_size != max_size:
padding = torch.FloatTensor(size=(max_size - local_size,)).cuda().to(tensor_type)
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
if origin_size is None:
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
else:
buffer = tensor[:size]
data_list.append(buffer)
if origin_size is not None:
new_shape = [-1] + list(origin_size[1:])
resized_list = []
for data in data_list:
# suppose the difference of tensor size exist in first dimension
data = data.reshape(new_shape)
resized_list.append(data)
return resized_list
else:
return data_list | Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank |
153,571 | import argparse, os, sys, datetime, glob
import numpy as np
import time
import torch
import torchvision
import pytorch_lightning as pl
import json
import pickle
from packaging import version
from omegaconf import OmegaConf
from torch.utils.data import DataLoader, Dataset
from functools import partial
from PIL import Image
import torch.distributed as dist
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import rank_zero_info
from pytorch_lightning.plugins import DDPPlugin
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
def melk(*args, **kwargs):
# run all checkpoint hooks
if trainer.global_rank == 0:
print("Summoning checkpoint.")
ckpt_path = os.path.join(ckptdir, "last.ckpt")
trainer.save_checkpoint(ckpt_path) | null |
153,572 | import argparse, os, sys, datetime, glob
import numpy as np
import time
import torch
import torchvision
import pytorch_lightning as pl
import json
import pickle
from packaging import version
from omegaconf import OmegaConf
from torch.utils.data import DataLoader, Dataset
from functools import partial
from PIL import Image
import torch.distributed as dist
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import rank_zero_info
from pytorch_lightning.plugins import DDPPlugin
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
def divein(*args, **kwargs):
if trainer.global_rank == 0:
import pudb;
pudb.set_trace() | null |
153,573 | from __future__ import annotations
import math
import random
import sys
from argparse import ArgumentParser
import einops
import k_diffusion as K
import numpy as np
import torch
import torch.nn as nn
from tqdm.auto import tqdm
from einops import rearrange
from omegaconf import OmegaConf
from PIL import Image, ImageOps
from torch import autocast
import json
import matplotlib.pyplot as plt
import seaborn
from pathlib import Path
from clip_similarity import ClipSimilarity
from edit_dataset import EditDatasetEval
from ldm.util import instantiate_from_config
def instantiate_from_config(config):
if not "target" in config:
if config == '__is_first_stage__':
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
if vae_ckpt is not None:
print(f"Loading VAE from {vae_ckpt}")
vae_sd = torch.load(vae_ckpt, map_location="cpu")["state_dict"]
sd = {
k: vae_sd[k[len("first_stage_model.") :]] if k.startswith("first_stage_model.") else v
for k, v in sd.items()
}
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
return model | null |
153,574 | from __future__ import annotations
import math
import random
import sys
from argparse import ArgumentParser
import einops
import k_diffusion as K
import numpy as np
import torch
import torch.nn as nn
from tqdm.auto import tqdm
from einops import rearrange
from omegaconf import OmegaConf
from PIL import Image, ImageOps
from torch import autocast
import json
import matplotlib.pyplot as plt
import seaborn
from pathlib import Path
from clip_similarity import ClipSimilarity
from edit_dataset import EditDatasetEval
from ldm.util import instantiate_from_config
class ImageEditor(nn.Module):
def __init__(self, config, ckpt, vae_ckpt=None):
def forward(
self,
image: torch.Tensor,
edit: str,
scale_txt: float = 7.5,
scale_img: float = 1.0,
steps: int = 100,
) -> torch.Tensor:
class ClipSimilarity(nn.Module):
def __init__(self, name: str = "ViT-L/14"):
def encode_text(self, text: list[str]) -> torch.Tensor:
def encode_image(self, image: torch.Tensor) -> torch.Tensor:
def forward(
self, image_0: torch.Tensor, image_1: torch.Tensor, text_0: list[str], text_1: list[str]
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
class EditDatasetEval(Dataset):
def __init__(
self,
path: str,
split: str = "train",
splits: tuple[float, float, float] = (0.9, 0.05, 0.05),
res: int = 256,
):
def __len__(self) -> int:
def __getitem__(self, i: int) -> dict[str, Any]:
def compute_metrics(config,
model_path,
vae_ckpt,
data_path,
output_path,
scales_img,
scales_txt,
num_samples = 5000,
split = "test",
steps = 50,
res = 512,
seed = 0):
editor = ImageEditor(config, model_path, vae_ckpt).cuda()
clip_similarity = ClipSimilarity().cuda()
outpath = Path(output_path, f"n={num_samples}_p={split}_s={steps}_r={res}_e={seed}.jsonl")
Path(output_path).mkdir(parents=True, exist_ok=True)
for scale_txt in scales_txt:
for scale_img in scales_img:
dataset = EditDatasetEval(
path=data_path,
split=split,
res=res
)
assert num_samples <= len(dataset)
print(f'Processing t={scale_txt}, i={scale_img}')
torch.manual_seed(seed)
perm = torch.randperm(len(dataset))
count = 0
i = 0
sim_0_avg = 0
sim_1_avg = 0
sim_direction_avg = 0
sim_image_avg = 0
count = 0
pbar = tqdm(total=num_samples)
while count < num_samples:
idx = perm[i].item()
sample = dataset[idx]
i += 1
gen = editor(sample["image_0"].cuda(), sample["edit"], scale_txt=scale_txt, scale_img=scale_img, steps=steps)
sim_0, sim_1, sim_direction, sim_image = clip_similarity(
sample["image_0"][None].cuda(), gen[None].cuda(), [sample["input_prompt"]], [sample["output_prompt"]]
)
sim_0_avg += sim_0.item()
sim_1_avg += sim_1.item()
sim_direction_avg += sim_direction.item()
sim_image_avg += sim_image.item()
count += 1
pbar.update(count)
pbar.close()
sim_0_avg /= count
sim_1_avg /= count
sim_direction_avg /= count
sim_image_avg /= count
with open(outpath, "a") as f:
f.write(f"{json.dumps(dict(sim_0=sim_0_avg, sim_1=sim_1_avg, sim_direction=sim_direction_avg, sim_image=sim_image_avg, num_samples=num_samples, split=split, scale_txt=scale_txt, scale_img=scale_img, steps=steps, res=res, seed=seed))}\n")
return outpath | null |
153,575 | from __future__ import annotations
import math
import random
import sys
from argparse import ArgumentParser
import einops
import k_diffusion as K
import numpy as np
import torch
import torch.nn as nn
from tqdm.auto import tqdm
from einops import rearrange
from omegaconf import OmegaConf
from PIL import Image, ImageOps
from torch import autocast
import json
import matplotlib.pyplot as plt
import seaborn
from pathlib import Path
from clip_similarity import ClipSimilarity
from edit_dataset import EditDatasetEval
from ldm.util import instantiate_from_config
def plot_metrics(metrics_file, output_path):
with open(metrics_file, 'r') as f:
data = [json.loads(line) for line in f]
plt.rcParams.update({'font.size': 11.5})
seaborn.set_style("darkgrid")
plt.figure(figsize=(20.5* 0.7, 10.8* 0.7), dpi=200)
x = [d["sim_direction"] for d in data]
y = [d["sim_image"] for d in data]
plt.plot(x, y, marker='o', linewidth=2, markersize=4)
plt.xlabel("CLIP Text-Image Direction Similarity", labelpad=10)
plt.ylabel("CLIP Image Similarity", labelpad=10)
plt.savefig(Path(output_path) / Path("plot.pdf"), bbox_inches="tight") | null |
153,576 | from __future__ import annotations
import math
import random
import sys
from argparse import ArgumentParser
import einops
import k_diffusion as K
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from omegaconf import OmegaConf
from PIL import Image, ImageOps
from torch import autocast
from stable_diffusion.ldm.util import instantiate_from_config
def instantiate_from_config(config):
if not "target" in config:
if config == '__is_first_stage__':
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
if vae_ckpt is not None:
print(f"Loading VAE from {vae_ckpt}")
vae_sd = torch.load(vae_ckpt, map_location="cpu")["state_dict"]
sd = {
k: vae_sd[k[len("first_stage_model.") :]] if k.startswith("first_stage_model.") else v
for k, v in sd.items()
}
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
return model | null |
153,577 | from __future__ import annotations
import math
import random
import sys
from argparse import ArgumentParser
import einops
import gradio as gr
import k_diffusion as K
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from omegaconf import OmegaConf
from PIL import Image, ImageOps
from torch import autocast
from stable_diffusion.ldm.util import instantiate_from_config
def instantiate_from_config(config):
def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
if vae_ckpt is not None:
print(f"Loading VAE from {vae_ckpt}")
vae_sd = torch.load(vae_ckpt, map_location="cpu")["state_dict"]
sd = {
k: vae_sd[k[len("first_stage_model.") :]] if k.startswith("first_stage_model.") else v
for k, v in sd.items()
}
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
return model | null |
153,578 | import os
import torch
import platform
import subprocess
from colorama import Fore, Style
from tempfile import NamedTemporaryFile
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation.utils import GenerationConfig
def init_model():
print("init model ...")
model = AutoModelForCausalLM.from_pretrained(
"baichuan-inc/Baichuan-13B-Chat",
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True
)
model.generation_config = GenerationConfig.from_pretrained(
"baichuan-inc/Baichuan-13B-Chat"
)
tokenizer = AutoTokenizer.from_pretrained(
"baichuan-inc/Baichuan-13B-Chat",
use_fast=False,
trust_remote_code=True
)
return model, tokenizer | null |
153,579 | import os
import torch
import platform
import subprocess
from colorama import Fore, Style
from tempfile import NamedTemporaryFile
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation.utils import GenerationConfig
def clear_screen():
if platform.system() == "Windows":
os.system("cls")
else:
os.system("clear")
print(Fore.YELLOW + Style.BRIGHT + "欢迎使用百川大模型,输入进行对话,vim 多行输入,clear 清空历史,CTRL+C 中断生成,stream 开关流式生成,exit 结束。")
return [] | null |
153,580 | import os
import torch
import platform
import subprocess
from colorama import Fore, Style
from tempfile import NamedTemporaryFile
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation.utils import GenerationConfig
def vim_input():
with NamedTemporaryFile() as tempfile:
tempfile.close()
subprocess.call(['vim', '+star', tempfile.name])
text = open(tempfile.name).read()
return text | null |
153,581 | import json
import torch
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation.utils import GenerationConfig
def init_model():
model = AutoModelForCausalLM.from_pretrained(
"baichuan-inc/Baichuan-13B-Chat",
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True
)
model.generation_config = GenerationConfig.from_pretrained(
"baichuan-inc/Baichuan-13B-Chat"
)
tokenizer = AutoTokenizer.from_pretrained(
"baichuan-inc/Baichuan-13B-Chat",
use_fast=False,
trust_remote_code=True
)
return model, tokenizer | null |
153,582 | import json
import torch
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation.utils import GenerationConfig
st.set_page_config(page_title="Baichuan-13B-Chat")
st.title("Baichuan-13B-Chat")
def clear_chat_history():
del st.session_state.messages | null |
153,583 | import json
import torch
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation.utils import GenerationConfig
st.set_page_config(page_title="Baichuan-13B-Chat")
st.title("Baichuan-13B-Chat")
def init_chat_history():
with st.chat_message("assistant", avatar='🤖'):
st.markdown("您好,我是百川大模型,很高兴为您服务🥰")
if "messages" in st.session_state:
for message in st.session_state.messages:
avatar = '🧑💻' if message["role"] == "user" else '🤖'
with st.chat_message(message["role"], avatar=avatar):
st.markdown(message["content"])
else:
st.session_state.messages = []
return st.session_state.messages | null |
153,584 | import math
import time
from torch import nn, optim
from torch.optim import Adam
from data import *
from models.model.transformer import Transformer
from util.bleu import idx_to_word, get_bleu
from util.epoch_timer import epoch_time
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad) | null |
153,585 | import math
import time
from torch import nn, optim
from torch.optim import Adam
from data import *
from models.model.transformer import Transformer
from util.bleu import idx_to_word, get_bleu
from util.epoch_timer import epoch_time
def initialize_weights(m):
if hasattr(m, 'weight') and m.weight.dim() > 1:
nn.init.kaiming_uniform(m.weight.data) | null |
153,586 | import math
import time
from torch import nn, optim
from torch.optim import Adam
from data import *
from models.model.transformer import Transformer
from util.bleu import idx_to_word, get_bleu
from util.epoch_timer import epoch_time
model = Transformer(src_pad_idx=src_pad_idx,
trg_pad_idx=trg_pad_idx,
trg_sos_idx=trg_sos_idx,
d_model=d_model,
enc_voc_size=enc_voc_size,
dec_voc_size=dec_voc_size,
max_len=max_len,
ffn_hidden=ffn_hidden,
n_head=n_heads,
n_layers=n_layers,
drop_prob=drop_prob,
device=device).to(device)
print(f'The model has {count_parameters(model):,} trainable parameters')
model.apply(initialize_weights)
optimizer = Adam(params=model.parameters(),
lr=init_lr,
weight_decay=weight_decay,
eps=adam_eps)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
verbose=True,
factor=factor,
patience=patience)
criterion = nn.CrossEntropyLoss(ignore_index=src_pad_idx)
def train(model, iterator, optimizer, criterion, clip):
def evaluate(model, iterator, criterion):
def epoch_time(start_time, end_time):
def run(total_epoch, best_loss):
train_losses, test_losses, bleus = [], [], []
for step in range(total_epoch):
start_time = time.time()
train_loss = train(model, train_iter, optimizer, criterion, clip)
valid_loss, bleu = evaluate(model, valid_iter, criterion)
end_time = time.time()
if step > warmup:
scheduler.step(valid_loss)
train_losses.append(train_loss)
test_losses.append(valid_loss)
bleus.append(bleu)
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_loss:
best_loss = valid_loss
torch.save(model.state_dict(), 'saved/model-{0}.pt'.format(valid_loss))
f = open('result/train_loss.txt', 'w')
f.write(str(train_losses))
f.close()
f = open('result/bleu.txt', 'w')
f.write(str(bleus))
f.close()
f = open('result/test_loss.txt', 'w')
f.write(str(test_losses))
f.close()
print(f'Epoch: {step + 1} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
print(f'\tVal Loss: {valid_loss:.3f} | Val PPL: {math.exp(valid_loss):7.3f}')
print(f'\tBLEU Score: {bleu:.3f}') | null |
153,587 | import matplotlib.pyplot as plt
import re
def read(name):
def draw(mode):
if mode == 'loss':
train = read('./result/train_loss.txt')
test = read('./result/test_loss.txt')
plt.plot(train, 'r', label='train')
plt.plot(test, 'b', label='validation')
plt.legend(loc='lower left')
elif mode == 'bleu':
bleu = read('./result/bleu.txt')
plt.plot(bleu, 'b', label='bleu score')
plt.legend(loc='lower right')
plt.xlabel('epoch')
plt.ylabel(mode)
plt.title('training result')
plt.grid(True, which='both', axis='both')
plt.show() | null |
153,588 | import json
import PIL
import gradio as gr
import numpy as np
from gradio import processing_utils
from packaging import version
from PIL import Image
from caption_anything.model import CaptionAnything
from caption_anything.utils.image_editing_utils import create_bubble_frame
from caption_anything.utils.utils import mask_painter, seg_model_map, prepare_segmenter, is_platform_win
from caption_anything.utils.parser import parse_augment
from caption_anything.captioner import build_captioner
from caption_anything.text_refiner import build_text_refiner
from caption_anything.segmenter import build_segmenter
from caption_anything.utils.chatbot import ConversationBot, build_chatbot_tools, get_new_image_name
from segment_anything import sam_model_registry
class ImageSketcher(gr.Image):
"""
Fix the bug of gradio.Image that cannot upload with tool == 'sketch'.
"""
is_template = True # Magic to make this work with gradio.Block, don't remove unless you know what you're doing.
def __init__(self, **kwargs):
super().__init__(tool="sketch", **kwargs)
def preprocess(self, x):
if self.tool == 'sketch' and self.source in ["upload", "webcam"]:
assert isinstance(x, dict)
if x['mask'] is None:
decode_image = processing_utils.decode_base64_to_image(x['image'])
width, height = decode_image.size
mask = np.zeros((height, width, 4), dtype=np.uint8)
mask[..., -1] = 255
mask = self.postprocess(mask)
x['mask'] = mask
return super().preprocess(x)
def init_openai_api_key(api_key=""):
text_refiner = None
if api_key and len(api_key) > 30:
try:
text_refiner = build_text_refiner(args.text_refiner, args.device, args, api_key)
text_refiner.llm('hi') # test
except:
text_refiner = None
openai_available = text_refiner is not None
return gr.update(visible=openai_available), gr.update(visible=openai_available), gr.update(
visible=openai_available), gr.update(visible=True), gr.update(visible=True), gr.update(
visible=True), text_refiner
def chat_with_points(chat_input, click_state, chat_state, state, text_refiner, img_caption):
if text_refiner is None:
response = "Text refiner is not initilzed, please input openai api key."
state = state + [(chat_input, response)]
return state, state, chat_state
points, labels, captions = click_state
# point_chat_prompt = "I want you act as a chat bot in terms of image. I will give you some points (w, h) in the image and tell you what happed on the point in natural language. Note that (0, 0) refers to the top-left corner of the image, w refers to the width and h refers the height. You should chat with me based on the fact in the image instead of imagination. Now I tell you the points with their visual description:\n{points_with_caps}\nNow begin chatting!"
suffix = '\nHuman: {chat_input}\nAI: '
qa_template = '\nHuman: {q}\nAI: {a}'
# # "The image is of width {width} and height {height}."
point_chat_prompt = "I am an AI trained to chat with you about an image. I am greate at what is going on in any image based on the image information your provide. The overall image description is \"{img_caption}\". You will also provide me objects in the image in details, i.e., their location and visual descriptions. Here are the locations and descriptions of events that happen in the image: {points_with_caps} \nYou are required to use language instead of number to describe these positions. Now, let's chat!"
prev_visual_context = ""
pos_points = []
pos_captions = []
for i in range(len(points)):
if labels[i] == 1:
pos_points.append(f"(X:{points[i][0]}, Y:{points[i][1]})")
pos_captions.append(captions[i])
prev_visual_context = prev_visual_context + '\n' + 'There is an event described as \"{}\" locating at {}'.format(
pos_captions[-1], ', '.join(pos_points))
context_length_thres = 500
prev_history = ""
for i in range(len(chat_state)):
q, a = chat_state[i]
if len(prev_history) < context_length_thres:
prev_history = prev_history + qa_template.format(**{"q": q, "a": a})
else:
break
chat_prompt = point_chat_prompt.format(
**{"img_caption": img_caption, "points_with_caps": prev_visual_context}) + prev_history + suffix.format(
**{"chat_input": chat_input})
print('\nchat_prompt: ', chat_prompt)
response = text_refiner.llm(chat_prompt)
state = state + [(chat_input, response)]
chat_state = chat_state + [(chat_input, response)]
return state, state, chat_state
def upload_callback(image_input, state):
if isinstance(image_input, dict): # if upload from sketcher_input, input contains image and mask
image_input, mask = image_input['image'], image_input['mask']
chat_state = []
click_state = [[], [], []]
res = 1024
width, height = image_input.size
ratio = min(1.0 * res / max(width, height), 1.0)
if ratio < 1.0:
image_input = image_input.resize((int(width * ratio), int(height * ratio)))
print('Scaling input image to {}'.format(image_input.size))
state = [] + [(None, 'Image size: ' + str(image_input.size))]
model = build_caption_anything_with_models(
args,
api_key="",
captioner=shared_captioner,
sam_model=shared_sam_model,
session_id=iface.app_id
)
model.segmenter.set_image(image_input)
image_embedding = model.image_embedding
original_size = model.original_size
input_size = model.input_size
img_caption = model.captioner.inference_seg(image_input)['caption']
return state, state, chat_state, image_input, click_state, image_input, image_input, image_embedding, \
original_size, input_size, img_caption
def inference_click(image_input, point_prompt, click_mode, enable_wiki, language, sentiment, factuality,
length, image_embedding, state, click_state, original_size, input_size, text_refiner,
evt: gr.SelectData):
click_index = evt.index
if point_prompt == 'Positive':
coordinate = "[[{}, {}, 1]]".format(str(click_index[0]), str(click_index[1]))
else:
coordinate = "[[{}, {}, 0]]".format(str(click_index[0]), str(click_index[1]))
prompt = get_click_prompt(coordinate, click_state, click_mode)
input_points = prompt['input_point']
input_labels = prompt['input_label']
controls = {'length': length,
'sentiment': sentiment,
'factuality': factuality,
'language': language}
model = build_caption_anything_with_models(
args,
api_key="",
captioner=shared_captioner,
sam_model=shared_sam_model,
text_refiner=text_refiner,
session_id=iface.app_id
)
model.setup(image_embedding, original_size, input_size, is_image_set=True)
enable_wiki = True if enable_wiki in ['True', 'TRUE', 'true', True, 'Yes', 'YES', 'yes'] else False
out = model.inference(image_input, prompt, controls, disable_gpt=True, enable_wiki=enable_wiki)[0]
state = state + [("Image point: {}, Input label: {}".format(prompt["input_point"], prompt["input_label"]), None)]
state = state + [(None, "raw_caption: {}".format(out['generated_captions']['raw_caption']))]
wiki = out['generated_captions'].get('wiki', "")
update_click_state(click_state, out['generated_captions']['raw_caption'], click_mode)
text = out['generated_captions']['raw_caption']
input_mask = np.array(out['mask'].convert('P'))
image_input = mask_painter(np.array(image_input), input_mask)
origin_image_input = image_input
image_input = create_bubble_frame(image_input, text, (click_index[0], click_index[1]), input_mask,
input_points=input_points, input_labels=input_labels)
yield state, state, click_state, image_input, wiki
if not args.disable_gpt and model.text_refiner:
refined_caption = model.text_refiner.inference(query=text, controls=controls, context=out['context_captions'],
enable_wiki=enable_wiki)
# new_cap = 'Original: ' + text + '. Refined: ' + refined_caption['caption']
new_cap = refined_caption['caption']
wiki = refined_caption['wiki']
state = state + [(None, f"caption: {new_cap}")]
refined_image_input = create_bubble_frame(origin_image_input, new_cap, (click_index[0], click_index[1]),
input_mask,
input_points=input_points, input_labels=input_labels)
yield state, state, click_state, refined_image_input, wiki
def inference_traject(sketcher_image, enable_wiki, language, sentiment, factuality, length, image_embedding, state,
original_size, input_size, text_refiner):
image_input, mask = sketcher_image['image'], sketcher_image['mask']
prompt = get_sketch_prompt(mask, multi_mask=True)
boxes = prompt['input_boxes']
controls = {'length': length,
'sentiment': sentiment,
'factuality': factuality,
'language': language}
model = build_caption_anything_with_models(
args,
api_key="",
captioner=shared_captioner,
sam_model=shared_sam_model,
text_refiner=text_refiner,
session_id=iface.app_id
)
model.setup(image_embedding, original_size, input_size, is_image_set=True)
enable_wiki = True if enable_wiki in ['True', 'TRUE', 'true', True, 'Yes', 'YES', 'yes'] else False
out = model.combined_inference(image_input, prompt, controls, disable_gpt=True, enable_wiki=enable_wiki,
enable_morphologyex=True)
# Update components and states
state.append((f'Box: {boxes}', None))
state.append((None, f'raw_caption: {out["generated_captions"]["raw_caption"]}'))
wiki = out['generated_captions'].get('wiki', "")
text = out['generated_captions']['raw_caption']
input_mask = np.array(out['mask'].convert('P'))
image_input = mask_painter(np.array(image_input), input_mask)
origin_image_input = image_input
fake_click_index = (int((boxes[0][0] + boxes[0][2]) / 2), int((boxes[0][1] + boxes[0][3]) / 2))
image_input = create_bubble_frame(image_input, text, fake_click_index, input_mask)
yield state, state, image_input, wiki
if not args.disable_gpt and model.text_refiner:
refined_caption = model.text_refiner.inference(query=text, controls=controls, context=out['context_captions'],
enable_wiki=enable_wiki)
new_cap = refined_caption['caption']
wiki = refined_caption['wiki']
state = state + [(None, f"caption: {new_cap}")]
refined_image_input = create_bubble_frame(origin_image_input, new_cap, fake_click_index, input_mask)
yield state, state, refined_image_input, wiki
def get_style():
current_version = version.parse(gr.__version__)
if current_version <= version.parse('3.24.1'):
style = '''
#image_sketcher{min-height:500px}
#image_sketcher [data-testid="image"], #image_sketcher [data-testid="image"] > div{min-height: 500px}
#image_upload{min-height:500px}
#image_upload [data-testid="image"], #image_upload [data-testid="image"] > div{min-height: 500px}
'''
elif current_version <= version.parse('3.27'):
style = '''
#image_sketcher{min-height:500px}
#image_upload{min-height:500px}
'''
else:
style = None
return style
def create_ui():
title = """<p><h1 align="center">Caption-Anything</h1></p>
"""
description = """<p>Gradio demo for Caption Anything, image to dense captioning generation with various language styles. To use it, simply upload your image, or click one of the examples to load them. Code: <a href="https://github.com/ttengwang/Caption-Anything">https://github.com/ttengwang/Caption-Anything</a> <a href="https://huggingface.co/spaces/TencentARC/Caption-Anything?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>"""
examples = [
["test_images/img35.webp"],
["test_images/img2.jpg"],
["test_images/img5.jpg"],
["test_images/img12.jpg"],
["test_images/img14.jpg"],
["test_images/qingming3.jpeg"],
["test_images/img1.jpg"],
]
with gr.Blocks(
css=get_style()
) as iface:
state = gr.State([])
click_state = gr.State([[], [], []])
chat_state = gr.State([])
origin_image = gr.State(None)
image_embedding = gr.State(None)
text_refiner = gr.State(None)
original_size = gr.State(None)
input_size = gr.State(None)
img_caption = gr.State(None)
gr.Markdown(title)
gr.Markdown(description)
with gr.Row():
with gr.Column(scale=1.0):
with gr.Column(visible=False) as modules_not_need_gpt:
with gr.Tab("Click"):
image_input = gr.Image(type="pil", interactive=True, elem_id="image_upload").style(height=500)
example_image = gr.Image(type="pil", interactive=False, visible=False)
with gr.Row(scale=1.0):
with gr.Row(scale=0.4):
point_prompt = gr.Radio(
choices=["Positive", "Negative"],
value="Positive",
label="Point Prompt",
interactive=True)
click_mode = gr.Radio(
choices=["Continuous", "Single"],
value="Continuous",
label="Clicking Mode",
interactive=True)
with gr.Row(scale=0.4):
clear_button_click = gr.Button(value="Clear Clicks", interactive=True)
clear_button_image = gr.Button(value="Clear Image", interactive=True)
with gr.Tab("Trajectory (Beta)"):
sketcher_input = ImageSketcher(type="pil", interactive=True, brush_radius=20,
elem_id="image_sketcher").style(height=500)
with gr.Row():
clear_button_traj = gr.Button(value="Clear Trajectory", interactive=True)
clear_button_traj_img = gr.Button(value="Clear Image", interactive=True)
submit_button_sketcher = gr.Button(value="Submit", interactive=True)
with gr.Column(visible=False) as modules_need_gpt:
with gr.Row(scale=1.0):
language = gr.Dropdown(
['English', 'Chinese', 'French', "Spanish", "Arabic", "Portuguese", "Cantonese"],
value="English", label="Language", interactive=True)
sentiment = gr.Radio(
choices=["Positive", "Natural", "Negative"],
value="Natural",
label="Sentiment",
interactive=True,
)
with gr.Row(scale=1.0):
factuality = gr.Radio(
choices=["Factual", "Imagination"],
value="Factual",
label="Factuality",
interactive=True,
)
length = gr.Slider(
minimum=10,
maximum=80,
value=10,
step=1,
interactive=True,
label="Generated Caption Length",
)
enable_wiki = gr.Radio(
choices=["Yes", "No"],
value="No",
label="Enable Wiki",
interactive=True)
with gr.Column(visible=True) as modules_not_need_gpt3:
gr.Examples(
examples=examples,
inputs=[example_image],
)
with gr.Column(scale=0.5):
openai_api_key = gr.Textbox(
placeholder="Input openAI API key",
show_label=False,
label="OpenAI API Key",
lines=1,
type="password")
with gr.Row(scale=0.5):
enable_chatGPT_button = gr.Button(value="Run with ChatGPT", interactive=True, variant='primary')
disable_chatGPT_button = gr.Button(value="Run without ChatGPT (Faster)", interactive=True,
variant='primary')
with gr.Column(visible=False) as modules_need_gpt2:
wiki_output = gr.Textbox(lines=5, label="Wiki", max_lines=5)
with gr.Column(visible=False) as modules_not_need_gpt2:
chatbot = gr.Chatbot(label="Chat about Selected Object", ).style(height=550, scale=0.5)
with gr.Column(visible=False) as modules_need_gpt3:
chat_input = gr.Textbox(show_label=False, placeholder="Enter text and press Enter").style(
container=False)
with gr.Row():
clear_button_text = gr.Button(value="Clear Text", interactive=True)
submit_button_text = gr.Button(value="Submit", interactive=True, variant="primary")
openai_api_key.submit(init_openai_api_key, inputs=[openai_api_key],
outputs=[modules_need_gpt, modules_need_gpt2, modules_need_gpt3, modules_not_need_gpt,
modules_not_need_gpt2, modules_not_need_gpt3, text_refiner])
enable_chatGPT_button.click(init_openai_api_key, inputs=[openai_api_key],
outputs=[modules_need_gpt, modules_need_gpt2, modules_need_gpt3,
modules_not_need_gpt,
modules_not_need_gpt2, modules_not_need_gpt3, text_refiner])
disable_chatGPT_button.click(init_openai_api_key,
outputs=[modules_need_gpt, modules_need_gpt2, modules_need_gpt3,
modules_not_need_gpt,
modules_not_need_gpt2, modules_not_need_gpt3, text_refiner])
enable_chatGPT_button.click(
lambda: (None, [], [], [[], [], []], "", "", ""),
[],
[image_input, chatbot, state, click_state, wiki_output, origin_image],
queue=False,
show_progress=False
)
openai_api_key.submit(
lambda: (None, [], [], [[], [], []], "", "", ""),
[],
[image_input, chatbot, state, click_state, wiki_output, origin_image],
queue=False,
show_progress=False
)
clear_button_click.click(
lambda x: ([[], [], []], x, ""),
[origin_image],
[click_state, image_input, wiki_output],
queue=False,
show_progress=False
)
clear_button_image.click(
lambda: (None, [], [], [], [[], [], []], "", "", ""),
[],
[image_input, chatbot, state, chat_state, click_state, wiki_output, origin_image, img_caption],
queue=False,
show_progress=False
)
clear_button_text.click(
lambda: ([], [], [[], [], [], []], []),
[],
[chatbot, state, click_state, chat_state],
queue=False,
show_progress=False
)
image_input.clear(
lambda: (None, [], [], [], [[], [], []], "", "", ""),
[],
[image_input, chatbot, state, chat_state, click_state, wiki_output, origin_image, img_caption],
queue=False,
show_progress=False
)
clear_button_traj.click(
lambda x: (x, ""),
[origin_image],
[sketcher_input, wiki_output],
queue=False,
show_progress=False
)
clear_button_traj_img.click(
lambda: (None, [], [], [], "", "", ""),
[],
[sketcher_input, chatbot, state, chat_state, wiki_output, origin_image, img_caption],
queue=False,
show_progress=False
)
image_input.upload(upload_callback, [image_input, state],
[chatbot, state, chat_state, origin_image, click_state, image_input, sketcher_input,
image_embedding, original_size, input_size, img_caption])
sketcher_input.upload(upload_callback, [sketcher_input, state],
[chatbot, state, chat_state, origin_image, click_state, image_input, sketcher_input,
image_embedding, original_size, input_size, img_caption])
chat_input.submit(chat_with_points, [chat_input, click_state, chat_state, state, text_refiner, img_caption],
[chatbot, state, chat_state])
chat_input.submit(lambda: "", None, chat_input)
example_image.change(upload_callback, [example_image, state],
[chatbot, state, chat_state, origin_image, click_state, image_input, sketcher_input,
image_embedding, original_size, input_size, img_caption])
# select coordinate
image_input.select(
inference_click,
inputs=[
origin_image, point_prompt, click_mode, enable_wiki, language, sentiment, factuality, length,
image_embedding, state, click_state, original_size, input_size, text_refiner
],
outputs=[chatbot, state, click_state, image_input, wiki_output],
show_progress=False, queue=True
)
submit_button_sketcher.click(
inference_traject,
inputs=[
sketcher_input, enable_wiki, language, sentiment, factuality, length, image_embedding, state,
original_size, input_size, text_refiner
],
outputs=[chatbot, state, sketcher_input, wiki_output],
show_progress=False, queue=True
)
return iface | null |
153,589 | import os
import json
import gradio as gr
import numpy as np
from gradio import processing_utils
from packaging import version
from PIL import Image, ImageDraw
import functools
from caption_anything.model import CaptionAnything
from caption_anything.utils.image_editing_utils import create_bubble_frame
from caption_anything.utils.utils import mask_painter, seg_model_map, prepare_segmenter, image_resize
from caption_anything.utils.parser import parse_augment
from caption_anything.captioner import build_captioner
from caption_anything.text_refiner import build_text_refiner
from caption_anything.segmenter import build_segmenter
from caption_anything.utils.chatbot import ConversationBot, build_chatbot_tools, get_new_image_name
from segment_anything import sam_model_registry
import easyocr
class ImageSketcher(gr.Image):
def __init__(self, **kwargs):
def preprocess(self, x):
def init_openai_api_key(api_key=""):
def init_wo_openai_api_key():
def chat_input_callback(*args):
def upload_callback(image_input, state, visual_chatgpt=None):
def inference_click(image_input, point_prompt, click_mode, enable_wiki, language, sentiment, factuality,
length, image_embedding, state, click_state, original_size, input_size, text_refiner, visual_chatgpt,
evt: gr.SelectData):
def inference_traject(sketcher_image, enable_wiki, language, sentiment, factuality, length, image_embedding, state,
original_size, input_size, text_refiner):
def clear_chat_memory(visual_chatgpt, keep_global=False):
def cap_everything(image_input, visual_chatgpt, text_refiner):
def get_style():
def create_ui():
title = """<p><h1 align="center">Caption-Anything</h1></p>
"""
description = """<p>Gradio demo for Caption Anything, image to dense captioning generation with various language styles. To use it, simply upload your image, or click one of the examples to load them. Code: <a href="https://github.com/ttengwang/Caption-Anything">https://github.com/ttengwang/Caption-Anything</a> <a href="https://huggingface.co/spaces/TencentARC/Caption-Anything?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>"""
examples = [
["test_images/img35.webp"],
["test_images/img2.jpg"],
["test_images/img5.jpg"],
["test_images/img12.jpg"],
["test_images/img14.jpg"],
["test_images/qingming3.jpeg"],
["test_images/img1.jpg"],
]
with gr.Blocks(
css=get_style()
) as iface:
state = gr.State([])
click_state = gr.State([[], [], []])
# chat_state = gr.State([])
origin_image = gr.State(None)
image_embedding = gr.State(None)
text_refiner = gr.State(None)
visual_chatgpt = gr.State(None)
original_size = gr.State(None)
input_size = gr.State(None)
# img_caption = gr.State(None)
aux_state = gr.State([])
gr.Markdown(title)
gr.Markdown(description)
with gr.Row():
with gr.Column(scale=1.0):
with gr.Column(visible=False) as modules_not_need_gpt:
with gr.Tab("Click"):
image_input = gr.Image(type="pil", interactive=True, elem_id="image_upload")
example_image = gr.Image(type="pil", interactive=False, visible=False)
with gr.Row(scale=1.0):
with gr.Row(scale=0.4):
point_prompt = gr.Radio(
choices=["Positive", "Negative"],
value="Positive",
label="Point Prompt",
interactive=True)
click_mode = gr.Radio(
choices=["Continuous", "Single"],
value="Continuous",
label="Clicking Mode",
interactive=True)
with gr.Row(scale=0.4):
clear_button_click = gr.Button(value="Clear Clicks", interactive=True)
clear_button_image = gr.Button(value="Clear Image", interactive=True)
with gr.Tab("Trajectory (beta)"):
sketcher_input = ImageSketcher(type="pil", interactive=True, brush_radius=20,
elem_id="image_sketcher")
with gr.Row():
submit_button_sketcher = gr.Button(value="Submit", interactive=True)
with gr.Column(visible=False) as modules_need_gpt1:
with gr.Row(scale=1.0):
language = gr.Dropdown(
['English', 'Chinese', 'French', "Spanish", "Arabic", "Portuguese", "Cantonese"],
value="English", label="Language", interactive=True)
sentiment = gr.Radio(
choices=["Positive", "Natural", "Negative"],
value="Natural",
label="Sentiment",
interactive=True,
)
with gr.Row(scale=1.0):
factuality = gr.Radio(
choices=["Factual", "Imagination"],
value="Factual",
label="Factuality",
interactive=True,
)
length = gr.Slider(
minimum=10,
maximum=80,
value=10,
step=1,
interactive=True,
label="Generated Caption Length",
)
enable_wiki = gr.Radio(
choices=["Yes", "No"],
value="No",
label="Enable Wiki",
interactive=True)
# with gr.Column(visible=True) as modules_not_need_gpt3:
gr.Examples(
examples=examples,
inputs=[example_image],
)
with gr.Column(scale=0.5):
with gr.Column(visible=True) as module_key_input:
openai_api_key = gr.Textbox(
placeholder="Input openAI API key",
show_label=False,
label="OpenAI API Key",
lines=1,
type="password")
with gr.Row(scale=0.5):
enable_chatGPT_button = gr.Button(value="Run with ChatGPT", interactive=True, variant='primary')
disable_chatGPT_button = gr.Button(value="Run without ChatGPT (Faster)", interactive=True,
variant='primary')
with gr.Column(visible=False) as module_notification_box:
notification_box = gr.Textbox(lines=1, label="Notification", max_lines=5, show_label=False)
with gr.Column(visible=False) as modules_need_gpt2:
paragraph_output = gr.Textbox(lines=7, label="Describe Everything", max_lines=7)
with gr.Column(visible=False) as modules_need_gpt0:
cap_everything_button = gr.Button(value="Caption Everything in a Paragraph", interactive=True)
with gr.Column(visible=False) as modules_not_need_gpt2:
chatbot = gr.Chatbot(label="Chatbox", ).style(height=550, scale=0.5)
with gr.Column(visible=False) as modules_need_gpt3:
chat_input = gr.Textbox(show_label=False, placeholder="Enter text and press Enter").style(
container=False)
with gr.Row():
clear_button_text = gr.Button(value="Clear Text", interactive=True)
submit_button_text = gr.Button(value="Submit", interactive=True, variant="primary")
openai_api_key.submit(init_openai_api_key, inputs=[openai_api_key],
outputs=[modules_need_gpt0, modules_need_gpt1, modules_need_gpt2, modules_need_gpt3, modules_not_need_gpt,
modules_not_need_gpt2, module_key_input, module_notification_box, text_refiner, visual_chatgpt, notification_box])
enable_chatGPT_button.click(init_openai_api_key, inputs=[openai_api_key],
outputs=[modules_need_gpt0, modules_need_gpt1, modules_need_gpt2, modules_need_gpt3,
modules_not_need_gpt,
modules_not_need_gpt2, module_key_input, module_notification_box, text_refiner, visual_chatgpt, notification_box])
disable_chatGPT_button.click(init_wo_openai_api_key,
outputs=[modules_need_gpt0, modules_need_gpt1, modules_need_gpt2, modules_need_gpt3,
modules_not_need_gpt,
modules_not_need_gpt2, module_key_input, module_notification_box, text_refiner, visual_chatgpt, notification_box])
enable_chatGPT_button.click(
lambda: (None, [], [], [[], [], []], "", "", ""),
[],
[image_input, chatbot, state, click_state, paragraph_output, origin_image],
queue=False,
show_progress=False
)
openai_api_key.submit(
lambda: (None, [], [], [[], [], []], "", "", ""),
[],
[image_input, chatbot, state, click_state, paragraph_output, origin_image],
queue=False,
show_progress=False
)
cap_everything_button.click(cap_everything, [origin_image, visual_chatgpt, text_refiner], [paragraph_output])
clear_button_click.click(
lambda x: ([[], [], []], x),
[origin_image],
[click_state, image_input],
queue=False,
show_progress=False
)
clear_button_click.click(functools.partial(clear_chat_memory, keep_global=True), inputs=[visual_chatgpt])
clear_button_image.click(
lambda: (None, [], [], [[], [], []], "", "", ""),
[],
[image_input, chatbot, state, click_state, paragraph_output, origin_image],
queue=False,
show_progress=False
)
clear_button_image.click(clear_chat_memory, inputs=[visual_chatgpt])
clear_button_text.click(
lambda: ([], [], [[], [], [], []]),
[],
[chatbot, state, click_state],
queue=False,
show_progress=False
)
clear_button_text.click(clear_chat_memory, inputs=[visual_chatgpt])
image_input.clear(
lambda: (None, [], [], [[], [], []], "", "", ""),
[],
[image_input, chatbot, state, click_state, paragraph_output, origin_image],
queue=False,
show_progress=False
)
image_input.clear(clear_chat_memory, inputs=[visual_chatgpt])
image_input.upload(upload_callback, [image_input, state, visual_chatgpt],
[chatbot, state, origin_image, click_state, image_input, sketcher_input,
image_embedding, original_size, input_size])
sketcher_input.upload(upload_callback, [sketcher_input, state, visual_chatgpt],
[chatbot, state, origin_image, click_state, image_input, sketcher_input,
image_embedding, original_size, input_size])
chat_input.submit(chat_input_callback, [visual_chatgpt, chat_input, click_state, state, aux_state],
[chatbot, state, aux_state])
chat_input.submit(lambda: "", None, chat_input)
submit_button_text.click(chat_input_callback, [visual_chatgpt, chat_input, click_state, state, aux_state],
[chatbot, state, aux_state])
submit_button_text.click(lambda: "", None, chat_input)
example_image.change(upload_callback, [example_image, state, visual_chatgpt],
[chatbot, state, origin_image, click_state, image_input, sketcher_input,
image_embedding, original_size, input_size])
example_image.change(clear_chat_memory, inputs=[visual_chatgpt])
# select coordinate
image_input.select(
inference_click,
inputs=[
origin_image, point_prompt, click_mode, enable_wiki, language, sentiment, factuality, length,
image_embedding, state, click_state, original_size, input_size, text_refiner, visual_chatgpt
],
outputs=[chatbot, state, click_state, image_input],
show_progress=False, queue=True
)
submit_button_sketcher.click(
inference_traject,
inputs=[
sketcher_input, enable_wiki, language, sentiment, factuality, length, image_embedding, state,
original_size, input_size, text_refiner
],
outputs=[chatbot, state, sketcher_input],
show_progress=False, queue=True
)
return iface | null |
153,590 | import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.activations import ACT2FN
from transformers.file_utils import ModelOutput
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPast,
BaseModelOutputWithPooling,
CausalLMOutputWithPast,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from transformers.models.git.configuration_git import GitConfig, GitVisionConfig
from .vit_pixel_masks_utils import ViTPatchMaskGenerator
The provided code snippet includes necessary dependencies for implementing the `_expand_mask` function. Write a Python function `def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None)` to solve the following problem:
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
Here is the function:
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) | Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. |
153,591 | import torch
from PIL import Image, ImageDraw, ImageOps
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
import json
import pdb
import cv2
import numpy as np
from typing import Any, Union, List
import time
import clip
from caption_anything.utils.utils import load_image
def boundary(inputs):
col = inputs.shape[1]
inputs = inputs.reshape(-1)
lens = len(inputs)
start = np.argmax(inputs)
end = lens - 1 - np.argmax(np.flip(inputs))
top = start // col
bottom = end // col
return top, bottom
def new_seg_to_box(seg_mask: Union[np.ndarray, Image.Image, str]):
if type(seg_mask) == str:
seg_mask = Image.open(seg_mask)
elif type(seg_mask) == np.ndarray:
seg_mask = Image.fromarray(seg_mask)
seg_mask = np.array(seg_mask) > 0
size = max(seg_mask.shape[0], seg_mask.shape[1])
top, bottom = boundary(seg_mask)
left, right = boundary(seg_mask.T)
return [left / size, top / size, right / size, bottom / size] | null |
153,592 | import torch
from PIL import Image, ImageDraw, ImageOps
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
import json
import pdb
import cv2
import numpy as np
from typing import Any, Union, List
import time
import clip
from caption_anything.utils.utils import load_image
def seg_to_box(seg_mask: Union[np.ndarray, Image.Image, str]):
if type(seg_mask) == str:
seg_mask = cv2.imread(seg_mask, cv2.IMREAD_GRAYSCALE)
_, seg_mask = cv2.threshold(seg_mask, 127, 255, 0)
elif type(seg_mask) == np.ndarray:
assert seg_mask.ndim == 2 # only support single-channel segmentation mask
seg_mask = seg_mask.astype('uint8')
if seg_mask.dtype == 'bool':
seg_mask = seg_mask * 255
contours, hierarchy = cv2.findContours(seg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = np.concatenate(contours, axis=0)
rect = cv2.minAreaRect(contours)
box = cv2.boxPoints(rect)
if rect[-1] >= 45:
newstart = box.argmin(axis=0)[1] # leftmost
else:
newstart = box.argmax(axis=0)[0] # topmost
box = np.concatenate([box[newstart:], box[:newstart]], axis=0)
box = np.int0(box)
return box | null |
153,593 | import torch
from PIL import Image, ImageDraw, ImageOps
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
import json
import pdb
import cv2
import numpy as np
from typing import Any, Union, List
import time
import clip
from caption_anything.utils.utils import load_image
def get_w_h(rect_points):
w = np.linalg.norm(rect_points[0] - rect_points[1], ord=2).astype('int')
h = np.linalg.norm(rect_points[0] - rect_points[3], ord=2).astype('int')
return w, h
def cut_box(img, rect_points):
w, h = get_w_h(rect_points)
dst_pts = np.array([[h, 0], [h, w], [0, w], [0, 0], ], dtype="float32")
transform = cv2.getPerspectiveTransform(rect_points.astype("float32"), dst_pts)
cropped_img = cv2.warpPerspective(img, transform, (h, w))
return cropped_img | null |
153,594 | from dataclasses import dataclass
from typing import Any, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn.functional import normalize
from transformers.activations import ACT2FN
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.models.blip.configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig
from transformers.models.blip.modeling_blip_text import BlipTextLMHeadModel, BlipTextModel
from .vit_pixel_masks_utils import ViTPatchMaskGenerator
def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
def blip_loss(similarity: torch.Tensor) -> torch.Tensor:
caption_loss = contrastive_loss(similarity)
image_loss = contrastive_loss(similarity.t())
return (caption_loss + image_loss) / 2.0 | null |
153,595 | import cv2
import json
import numpy as np
from typing import List
import random
from typing import Union
The provided code snippet includes necessary dependencies for implementing the `draw_bbox` function. Write a Python function `def draw_bbox(img: Union[np.ndarray, str], save_name: str, bbox: List[dict], show_caption: bool = False)` to solve the following problem:
bbox: [{'image_id': str, 'bbox': [x1, y1, x2, y2], 'caption': str}, ...]
Here is the function:
def draw_bbox(img: Union[np.ndarray, str], save_name: str, bbox: List[dict], show_caption: bool = False):
"""
bbox: [{'image_id': str, 'bbox': [x1, y1, x2, y2], 'caption': str}, ...]
"""
if isinstance(img, str):
img = cv2.imread(img)
RGB = [0, 50, 100, 150, 200, 250]
for box in bbox:
box['bbox'] = [int(_) for _ in box['bbox']]
x1, y1, x2, y2 = box['bbox']
caption = box['caption']
box_color = random.choices(RGB, k = 3)
(text_width, text_height), _ = cv2.getTextSize(caption, cv2.FONT_HERSHEY_SIMPLEX, fontScale = 0.5, thickness = 2)
cv2.rectangle(img, (x1, y1), (x2, y2), color = box_color, thickness = 2)
if show_caption:
cv2.putText(img, caption, (x1, y1 + text_height), cv2.FONT_HERSHEY_SIMPLEX, fontScale = 0.5, color = box_color, thickness = 2)
cv2.imwrite(save_name, img)
# cv2.imshow('visualise', img)
# cv2.waitKey(0) | bbox: [{'image_id': str, 'bbox': [x1, y1, x2, y2], 'caption': str}, ...] |
153,596 | import cv2
import json
import numpy as np
from typing import List
import random
from typing import Union
def parse_bbox(anno, image_id: int = None):
with open(anno, 'r') as f:
predictions = json.load(f)
if image_id is None:
image_id = next(iter(predictions))
return predictions[image_id] | null |
153,597 | import cv2
import json
import numpy as np
from typing import List
import random
from typing import Union
def gt_bbox(anno, img_name: int = None):
with open(anno, 'r') as f:
annotations = json.load(f)
annotations = annotations['annotations']
gt = []
img_name = int(img_name[:-4])
for annotation in annotations:
if annotation['image_id'] == 63:
x1, y1, w, h = annotation['bbox']
gt.append({'bbox': [x1, y1, x1 + w, y1 + h], 'caption': annotation['caption']})
return gt | null |
153,598 | import os
import time
import sys
import cv2
import hashlib
import requests
import numpy as np
from typing import Union
from PIL import Image
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `load_image` function. Write a Python function `def load_image(image: Union[np.ndarray, Image.Image, str], return_type='numpy')` to solve the following problem:
Load image from path or PIL.Image or numpy.ndarray to required format.
Here is the function:
def load_image(image: Union[np.ndarray, Image.Image, str], return_type='numpy'):
"""
Load image from path or PIL.Image or numpy.ndarray to required format.
"""
# Check if image is already in return_type
if isinstance(image, Image.Image) and return_type == 'pil' or \
isinstance(image, np.ndarray) and return_type == 'numpy':
return image
# PIL.Image as intermediate format
if isinstance(image, str):
image = Image.open(image)
elif isinstance(image, np.ndarray):
image = Image.fromarray(image)
if image.mode == "RGBA":
image = image.convert("RGB")
if return_type == 'pil':
return image
elif return_type == 'numpy':
return np.asarray(image)
else:
raise NotImplementedError() | Load image from path or PIL.Image or numpy.ndarray to required format. |
153,599 | import os
import time
import sys
import cv2
import hashlib
import requests
import numpy as np
from typing import Union
from PIL import Image
from tqdm import tqdm
def xywh_to_x1y1x2y2(bbox):
x, y, w, h = bbox
return x,y,x+w,y+h | null |
153,600 | import os
import time
import sys
import cv2
import hashlib
import requests
import numpy as np
from typing import Union
from PIL import Image
from tqdm import tqdm
def x1y1x2y2_to_xywh(bbox):
x1, y1, x2, y2 = bbox
return x1,y1,x2-x1,y2-y1 | null |
153,601 | import os
import time
import sys
import cv2
import hashlib
import requests
import numpy as np
from typing import Union
from PIL import Image
from tqdm import tqdm
def get_image_shape(image):
if isinstance(image, str):
return Image.open(image).size
elif isinstance(image, np.ndarray):
return image.shape
elif isinstance(image, Image.Image):
return image.size
else:
raise NotImplementedError | null |
153,602 | import os
import time
import sys
import cv2
import hashlib
import requests
import numpy as np
from typing import Union
from PIL import Image
from tqdm import tqdm
def is_platform_win():
return sys.platform == "win32" | null |
153,603 | import os
import time
import sys
import cv2
import hashlib
import requests
import numpy as np
from typing import Union
from PIL import Image
from tqdm import tqdm
color_list = colormap()
color_list = color_list.astype('uint8').tolist()
def colormap(rgb=True):
color_list = np.array(
[
0.000, 0.000, 0.000,
1.000, 1.000, 1.000,
1.000, 0.498, 0.313,
0.392, 0.581, 0.929,
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.167, 0.000, 0.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.143, 0.143, 0.143,
0.286, 0.286, 0.286,
0.429, 0.429, 0.429,
0.571, 0.571, 0.571,
0.714, 0.714, 0.714,
0.857, 0.857, 0.857
]
).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
if not rgb:
color_list = color_list[:, ::-1]
return color_list | null |
153,604 | import os
import time
import sys
import cv2
import hashlib
import requests
import numpy as np
from typing import Union
from PIL import Image
from tqdm import tqdm
def vis_add_mask_wo_blur(image, mask, color, alpha):
color = np.array(color)
mask = mask.astype('float').copy()
for i in range(3):
image[:, :, i] = image[:, :, i] * (1 - alpha + mask) + color[i] * (alpha - mask)
return image | null |
153,605 | import os
import time
import sys
import cv2
import hashlib
import requests
import numpy as np
from typing import Union
from PIL import Image
from tqdm import tqdm
def mask_painter(input_image, input_mask, background_alpha=0.7, background_blur_radius=7, contour_width=3,
contour_color=3, contour_alpha=1, background_color=0, paint_foreground=False):
"""
add color mask to the background/foreground area
input_image: numpy array (w, h, C)
input_mask: numpy array (w, h)
background_alpha: transparency of background, [0, 1], 1: all black, 0: do nothing
background_blur_radius: radius of background blur, must be odd number
contour_width: width of mask contour, must be odd number
contour_color: color index (in color map) of mask contour, 0: black, 1: white, >1: others
background_color: color index of the background (area with input_mask == False)
contour_alpha: transparency of mask contour, [0, 1], if 0: no contour highlighted
paint_foreground: True for paint on foreground, False for background. Default: Flase
Output:
painted_image: numpy array
"""
assert input_image.shape[:2] == input_mask.shape, 'different shape'
assert background_blur_radius % 2 * contour_width % 2 > 0, 'background_blur_radius and contour_width must be ODD'
# 0: background, 1: foreground
input_mask[input_mask > 0] = 255
if paint_foreground:
painted_image = vis_add_mask(input_image, 255 - input_mask, color_list[background_color], background_alpha,
background_blur_radius) # black for background
else:
# mask background
painted_image = vis_add_mask(input_image, input_mask, color_list[background_color], background_alpha,
background_blur_radius) # black for background
# mask contour
contour_mask = input_mask.copy()
contour_mask = cv2.Canny(contour_mask, 100, 200) # contour extraction
# widden contour
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (contour_width, contour_width))
contour_mask = cv2.dilate(contour_mask, kernel)
painted_image = vis_add_mask(painted_image, 255 - contour_mask, color_list[contour_color], contour_alpha,
contour_width)
return painted_image
The provided code snippet includes necessary dependencies for implementing the `mask_painter_foreground_all` function. Write a Python function `def mask_painter_foreground_all(input_image, input_masks, background_alpha=0.7, background_blur_radius=7, contour_width=3, contour_color=3, contour_alpha=1)` to solve the following problem:
paint color mask on the all foreground area input_image: numpy array with shape (w, h, C) input_mask: list of masks, each mask is a numpy array with shape (w,h) background_alpha: transparency of background, [0, 1], 1: all black, 0: do nothing background_blur_radius: radius of background blur, must be odd number contour_width: width of mask contour, must be odd number contour_color: color index (in color map) of mask contour, 0: black, 1: white, >1: others background_color: color index of the background (area with input_mask == False) contour_alpha: transparency of mask contour, [0, 1], if 0: no contour highlighted Output: painted_image: numpy array
Here is the function:
def mask_painter_foreground_all(input_image, input_masks, background_alpha=0.7, background_blur_radius=7,
contour_width=3, contour_color=3, contour_alpha=1):
"""
paint color mask on the all foreground area
input_image: numpy array with shape (w, h, C)
input_mask: list of masks, each mask is a numpy array with shape (w,h)
background_alpha: transparency of background, [0, 1], 1: all black, 0: do nothing
background_blur_radius: radius of background blur, must be odd number
contour_width: width of mask contour, must be odd number
contour_color: color index (in color map) of mask contour, 0: black, 1: white, >1: others
background_color: color index of the background (area with input_mask == False)
contour_alpha: transparency of mask contour, [0, 1], if 0: no contour highlighted
Output:
painted_image: numpy array
"""
for i, input_mask in enumerate(input_masks):
input_image = mask_painter(input_image, input_mask, background_alpha, background_blur_radius, contour_width,
contour_color, contour_alpha, background_color=i + 2, paint_foreground=True)
return input_image | paint color mask on the all foreground area input_image: numpy array with shape (w, h, C) input_mask: list of masks, each mask is a numpy array with shape (w,h) background_alpha: transparency of background, [0, 1], 1: all black, 0: do nothing background_blur_radius: radius of background blur, must be odd number contour_width: width of mask contour, must be odd number contour_color: color index (in color map) of mask contour, 0: black, 1: white, >1: others background_color: color index of the background (area with input_mask == False) contour_alpha: transparency of mask contour, [0, 1], if 0: no contour highlighted Output: painted_image: numpy array |
153,606 | import os
import time
import sys
import cv2
import hashlib
import requests
import numpy as np
from typing import Union
from PIL import Image
from tqdm import tqdm
color_list = colormap()
color_list = color_list.astype('uint8').tolist()
def vis_add_mask_wo_gaussian(image, background_mask, contour_mask, background_color, contour_color, background_alpha,
contour_alpha):
background_color = np.array(background_color)
contour_color = np.array(contour_color)
# background_mask = 1 - background_mask
# contour_mask = 1 - contour_mask
for i in range(3):
image[:, :, i] = image[:, :, i] * (1 - background_alpha + background_mask * background_alpha) \
+ background_color[i] * (background_alpha - background_mask * background_alpha)
image[:, :, i] = image[:, :, i] * (1 - contour_alpha + contour_mask * contour_alpha) \
+ contour_color[i] * (contour_alpha - contour_mask * contour_alpha)
return image.astype('uint8')
def mask_generator_00(mask, background_radius, contour_radius):
# no background width when '00'
# distance map
dist_transform_fore = cv2.distanceTransform(mask, cv2.DIST_L2, 3)
dist_transform_back = cv2.distanceTransform(1 - mask, cv2.DIST_L2, 3)
dist_map = dist_transform_fore - dist_transform_back
# ...:::!!!:::...
contour_radius += 2
contour_mask = np.abs(np.clip(dist_map, -contour_radius, contour_radius))
contour_mask = contour_mask / np.max(contour_mask)
contour_mask[contour_mask > 0.5] = 1.
return mask, contour_mask
def mask_generator_01(mask, background_radius, contour_radius):
# no background width when '00'
# distance map
dist_transform_fore = cv2.distanceTransform(mask, cv2.DIST_L2, 3)
dist_transform_back = cv2.distanceTransform(1 - mask, cv2.DIST_L2, 3)
dist_map = dist_transform_fore - dist_transform_back
# ...:::!!!:::...
contour_radius += 2
contour_mask = np.abs(np.clip(dist_map, -contour_radius, contour_radius))
contour_mask = contour_mask / np.max(contour_mask)
return mask, contour_mask
def mask_generator_10(mask, background_radius, contour_radius):
# distance map
dist_transform_fore = cv2.distanceTransform(mask, cv2.DIST_L2, 3)
dist_transform_back = cv2.distanceTransform(1 - mask, cv2.DIST_L2, 3)
dist_map = dist_transform_fore - dist_transform_back
# .....:::::!!!!!
background_mask = np.clip(dist_map, -background_radius, background_radius)
background_mask = (background_mask - np.min(background_mask))
background_mask = background_mask / np.max(background_mask)
# ...:::!!!:::...
contour_radius += 2
contour_mask = np.abs(np.clip(dist_map, -contour_radius, contour_radius))
contour_mask = contour_mask / np.max(contour_mask)
contour_mask[contour_mask > 0.5] = 1.
return background_mask, contour_mask
def mask_generator_11(mask, background_radius, contour_radius):
# distance map
dist_transform_fore = cv2.distanceTransform(mask, cv2.DIST_L2, 3)
dist_transform_back = cv2.distanceTransform(1 - mask, cv2.DIST_L2, 3)
dist_map = dist_transform_fore - dist_transform_back
# .....:::::!!!!!
background_mask = np.clip(dist_map, -background_radius, background_radius)
background_mask = (background_mask - np.min(background_mask))
background_mask = background_mask / np.max(background_mask)
# ...:::!!!:::...
contour_radius += 2
contour_mask = np.abs(np.clip(dist_map, -contour_radius, contour_radius))
contour_mask = contour_mask / np.max(contour_mask)
return background_mask, contour_mask
The provided code snippet includes necessary dependencies for implementing the `mask_painter_wo_gaussian` function. Write a Python function `def mask_painter_wo_gaussian(input_image, input_mask, background_alpha=0.5, background_blur_radius=7, contour_width=3, contour_color=3, contour_alpha=1, mode='11')` to solve the following problem:
Input: input_image: numpy array input_mask: numpy array background_alpha: transparency of background, [0, 1], 1: all black, 0: do nothing background_blur_radius: radius of background blur, must be odd number contour_width: width of mask contour, must be odd number contour_color: color index (in color map) of mask contour, 0: black, 1: white, >1: others contour_alpha: transparency of mask contour, [0, 1], if 0: no contour highlighted mode: painting mode, '00', no blur, '01' only blur contour, '10' only blur background, '11' blur both Output: painted_image: numpy array
Here is the function:
def mask_painter_wo_gaussian(input_image, input_mask, background_alpha=0.5, background_blur_radius=7, contour_width=3,
contour_color=3, contour_alpha=1, mode='11'):
"""
Input:
input_image: numpy array
input_mask: numpy array
background_alpha: transparency of background, [0, 1], 1: all black, 0: do nothing
background_blur_radius: radius of background blur, must be odd number
contour_width: width of mask contour, must be odd number
contour_color: color index (in color map) of mask contour, 0: black, 1: white, >1: others
contour_alpha: transparency of mask contour, [0, 1], if 0: no contour highlighted
mode: painting mode, '00', no blur, '01' only blur contour, '10' only blur background, '11' blur both
Output:
painted_image: numpy array
"""
assert input_image.shape[:2] == input_mask.shape, 'different shape'
assert background_blur_radius % 2 * contour_width % 2 > 0, 'background_blur_radius and contour_width must be ODD'
assert mode in ['00', '01', '10', '11'], 'mode should be 00, 01, 10, or 11'
# downsample input image and mask
width, height = input_image.shape[0], input_image.shape[1]
res = 1024
ratio = min(1.0 * res / max(width, height), 1.0)
input_image = cv2.resize(input_image, (int(height * ratio), int(width * ratio)))
input_mask = cv2.resize(input_mask, (int(height * ratio), int(width * ratio)))
# 0: background, 1: foreground
msk = np.clip(input_mask, 0, 1)
# generate masks for background and contour pixels
background_radius = (background_blur_radius - 1) // 2
contour_radius = (contour_width - 1) // 2
generator_dict = {'00': mask_generator_00, '01': mask_generator_01, '10': mask_generator_10,
'11': mask_generator_11}
background_mask, contour_mask = generator_dict[mode](msk, background_radius, contour_radius)
# paint
painted_image = vis_add_mask_wo_gaussian \
(input_image, background_mask, contour_mask, color_list[0], color_list[contour_color], background_alpha,
contour_alpha) # black for background
return painted_image | Input: input_image: numpy array input_mask: numpy array background_alpha: transparency of background, [0, 1], 1: all black, 0: do nothing background_blur_radius: radius of background blur, must be odd number contour_width: width of mask contour, must be odd number contour_color: color index (in color map) of mask contour, 0: black, 1: white, >1: others contour_alpha: transparency of mask contour, [0, 1], if 0: no contour highlighted mode: painting mode, '00', no blur, '01' only blur contour, '10' only blur background, '11' blur both Output: painted_image: numpy array |
153,607 | import os
import time
import sys
import cv2
import hashlib
import requests
import numpy as np
from typing import Union
from PIL import Image
from tqdm import tqdm
seg_model_map = {
'base': 'vit_b',
'large': 'vit_l',
'huge': 'vit_h'
}
ckpt_url_map = {
'vit_b': 'https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth',
'vit_l': 'https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth',
'vit_h': 'https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth'
}
expected_sha256_map = {
'vit_b': 'ec2df62732614e57411cdcf32a23ffdf28910380d03139ee0f4fcbe91eb8c912',
'vit_l': '3adcc4315b642a4d2101128f611684e8734c41232a17c648ed1693702a49a622',
'vit_h': 'a7bf3b02f3ebf1267aba913ff637d9a2d5c33d3173bb679e46d9f338c26f262e'
}
def download_checkpoint(url, folder, filename, expected_sha256):
os.makedirs(folder, exist_ok=True)
download_target = os.path.join(folder, filename)
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
print(f'Download SAM checkpoint {url}, saving to {download_target} ...')
with requests.get(url, stream=True) as response, open(download_target, "wb") as output:
progress = tqdm(total=int(response.headers.get('content-length', 0)), unit='B', unit_scale=True)
for data in response.iter_content(chunk_size=1024):
size = output.write(data)
progress.update(size)
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError("Model has been downloaded but the SHA256 checksum does not not match")
return download_target
The provided code snippet includes necessary dependencies for implementing the `prepare_segmenter` function. Write a Python function `def prepare_segmenter(segmenter="huge", download_root: str = None)` to solve the following problem:
Prepare segmenter model and download checkpoint if necessary. Returns: segmenter model name from 'vit_b', 'vit_l', 'vit_h'.
Here is the function:
def prepare_segmenter(segmenter="huge", download_root: str = None):
"""
Prepare segmenter model and download checkpoint if necessary.
Returns: segmenter model name from 'vit_b', 'vit_l', 'vit_h'.
"""
os.makedirs('result', exist_ok=True)
seg_model_name = seg_model_map[segmenter]
checkpoint_url = ckpt_url_map[seg_model_name]
folder = download_root or os.path.expanduser("~/.cache/SAM")
filename = os.path.basename(checkpoint_url)
segmenter_checkpoint = download_checkpoint(checkpoint_url, folder, filename, expected_sha256_map[seg_model_name])
return seg_model_name, segmenter_checkpoint | Prepare segmenter model and download checkpoint if necessary. Returns: segmenter model name from 'vit_b', 'vit_l', 'vit_h'. |
153,608 | import argparse
def parse_augment():
parser = argparse.ArgumentParser()
parser.add_argument('--captioner', type=str, default="blip2")
parser.add_argument('--segmenter', type=str, default="huge")
parser.add_argument('--text_refiner', type=str, default="base")
parser.add_argument('--segmenter_checkpoint', type=str, default=None, help="SAM checkpoint path")
parser.add_argument('--seg_crop_mode', type=str, default="wo_bg", choices=['wo_bg', 'w_bg'],
help="whether to add or remove background of the image when captioning")
parser.add_argument('--clip_filter', action="store_true", help="use clip to filter bad captions")
parser.add_argument('--context_captions', action="store_true",
help="use surrounding captions to enhance current caption (TODO)")
parser.add_argument('--disable_regular_box', action="store_true", default=False,
help="crop image with a regular box")
parser.add_argument('--device', type=str, default="cuda:0")
parser.add_argument('--port', type=int, default=6086, help="only useful when running gradio applications")
parser.add_argument('--debug', action="store_true")
parser.add_argument('--gradio_share', action="store_true")
parser.add_argument('--disable_gpt', action="store_true")
parser.add_argument('--enable_reduce_tokens', action="store_true", default=False)
parser.add_argument('--disable_reuse_features', action="store_true", default=False)
parser.add_argument('--enable_morphologyex', action="store_true", default=False)
parser.add_argument('--chat_tools_dict', type=str, default='VisualQuestionAnswering_cuda:0', help='Visual ChatGPT tools, only useful when running gradio applications')
parser.add_argument('--pred_iou_thresh', type=float, default=0.88, help="sam post-precessing")
parser.add_argument('--min_mask_region_area', type=int, default=0, help="sam post-precessing")
parser.add_argument('--stability_score_thresh', type=float, default=0.95, help='sam post-processing')
parser.add_argument('--box_nms_thresh', type=float, default=0.7, help='sam post-processing')
args = parser.parse_args()
if args.debug:
print(args)
return args | null |
153,609 | import os
import gradio as gr
import re
import uuid
from PIL import Image, ImageDraw, ImageOps
import numpy as np
import argparse
import inspect
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI
import torch
from PIL import Image, ImageDraw, ImageOps
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
def prompts(name, description):
def decorator(func):
func.name = name
func.description = description
return func
return decorator | null |
153,610 | import os
import gradio as gr
import re
import uuid
from PIL import Image, ImageDraw, ImageOps
import numpy as np
import argparse
import inspect
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI
import torch
from PIL import Image, ImageDraw, ImageOps
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
def cut_dialogue_history(history_memory, keep_last_n_words=500):
if history_memory is None or len(history_memory) == 0:
return history_memory
tokens = history_memory.split()
n_tokens = len(tokens)
print(f"history_memory:{history_memory}, n_tokens: {n_tokens}")
if n_tokens < keep_last_n_words:
return history_memory
paragraphs = history_memory.split('\n')
last_n_tokens = n_tokens
while last_n_tokens >= keep_last_n_words:
last_n_tokens -= len(paragraphs[0].split(' '))
paragraphs = paragraphs[1:]
return '\n' + '\n'.join(paragraphs) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.