id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
180,912 | import torch
import torch.nn as nn
from torch.nn import init
import functools
import numpy as np
from . import resnet
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer | null |
180,913 | import torch
import torch.nn as nn
from torch.nn import init
import functools
import numpy as np
from . import resnet
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params) | null |
180,914 | import torch
import torch.nn as nn
from torch.nn import init
import functools
import numpy as np
from . import resnet
def get_model(arch):
if hasattr(resnet, arch):
network = getattr(resnet, arch)
return network(pretrained=True, num_classes=512)
else:
raise ValueError("Invalid Backbone Architecture") | null |
180,915 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False) | 3x3 convolution with padding |
180,916 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc1 = nn.Linear(512 * block.expansion, 1024)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.relu(x)
x = self.fc1(x)
x = self.relu(x)
return x
def resnet18(pretrained=False, **kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['resnet18'])
model.load_state_dict(pretrained_state_dict, strict=False)
return model | null |
180,917 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc1 = nn.Linear(512 * block.expansion, 1024)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.relu(x)
x = self.fc1(x)
x = self.relu(x)
return x
The provided code snippet includes necessary dependencies for implementing the `resnet34` function. Write a Python function `def resnet34(pretrained=False, **kwargs)` to solve the following problem:
Constructs a ResNet-34 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['resnet34'])
model.load_state_dict(pretrained_state_dict, strict=False)
return model | Constructs a ResNet-34 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
180,918 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc1 = nn.Linear(512 * block.expansion, 1024)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.relu(x)
x = self.fc1(x)
x = self.relu(x)
return x
The provided code snippet includes necessary dependencies for implementing the `resnet50` function. Write a Python function `def resnet50(pretrained=False, **kwargs)` to solve the following problem:
Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['resnet50'])
model.load_state_dict(pretrained_state_dict, strict=False)
return model | Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
180,919 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc1 = nn.Linear(512 * block.expansion, 1024)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.relu(x)
x = self.fc1(x)
x = self.relu(x)
return x
The provided code snippet includes necessary dependencies for implementing the `resnet101` function. Write a Python function `def resnet101(pretrained=False, **kwargs)` to solve the following problem:
Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['resnet101'])
model.load_state_dict(pretrained_state_dict, strict=False)
return model | Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
180,920 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc1 = nn.Linear(512 * block.expansion, 1024)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.relu(x)
x = self.fc1(x)
x = self.relu(x)
return x
The provided code snippet includes necessary dependencies for implementing the `resnet152` function. Write a Python function `def resnet152(pretrained=False, **kwargs)` to solve the following problem:
Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['resnet152'])
model.load_state_dict(pretrained_state_dict, strict=False)
return model | Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
180,921 | import ctypes
import fnmatch
import importlib
import inspect
import os
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from typing import Any, List, Tuple, Union, Optional
from distutils.util import strtobool
import shutil
import numpy as np
_dnnlib_cache_dir = None
def set_cache_dir(path: str) -> None:
global _dnnlib_cache_dir
_dnnlib_cache_dir = path | null |
180,922 | import ctypes
import fnmatch
import importlib
import inspect
import os
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from typing import Any, List, Tuple, Union, Optional
from distutils.util import strtobool
import shutil
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `format_time` function. Write a Python function `def format_time(seconds: Union[int, float]) -> str` to solve the following problem:
Convert the seconds to human readable string with days, hours, minutes and seconds.
Here is the function:
def format_time(seconds: Union[int, float]) -> str:
"""Convert the seconds to human readable string with days, hours, minutes and seconds."""
s = int(np.rint(seconds))
if s < 60:
return "{0}s".format(s)
elif s < 60 * 60:
return "{0}m {1:02}s".format(s // 60, s % 60)
elif s < 24 * 60 * 60:
return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
else:
return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60) | Convert the seconds to human readable string with days, hours, minutes and seconds. |
180,923 | import ctypes
import fnmatch
import importlib
import inspect
import os
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from typing import Any, List, Tuple, Union, Optional
from distutils.util import strtobool
import shutil
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `format_time_brief` function. Write a Python function `def format_time_brief(seconds: Union[int, float]) -> str` to solve the following problem:
Convert the seconds to human readable string with days, hours, minutes and seconds.
Here is the function:
def format_time_brief(seconds: Union[int, float]) -> str:
"""Convert the seconds to human readable string with days, hours, minutes and seconds."""
s = int(np.rint(seconds))
if s < 60:
return "{0}s".format(s)
elif s < 60 * 60:
return "{0}m {1:02}s".format(s // 60, s % 60)
elif s < 24 * 60 * 60:
return "{0}h {1:02}m".format(s // (60 * 60), (s // 60) % 60)
else:
return "{0}d {1:02}h".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24) | Convert the seconds to human readable string with days, hours, minutes and seconds. |
180,924 | import ctypes
import fnmatch
import importlib
import inspect
import os
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from typing import Any, List, Tuple, Union, Optional
from distutils.util import strtobool
import shutil
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `ask_yes_no` function. Write a Python function `def ask_yes_no(question: str) -> bool` to solve the following problem:
Ask the user the question until the user inputs a valid answer.
Here is the function:
def ask_yes_no(question: str) -> bool:
"""Ask the user the question until the user inputs a valid answer."""
while True:
try:
print("{0} [y/n]".format(question))
return strtobool(input().lower())
except ValueError:
pass | Ask the user the question until the user inputs a valid answer. |
180,925 | import ctypes
import fnmatch
import importlib
import inspect
import os
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from typing import Any, List, Tuple, Union, Optional
from distutils.util import strtobool
import shutil
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `tuple_product` function. Write a Python function `def tuple_product(t: Tuple) -> Any` to solve the following problem:
Calculate the product of the tuple elements.
Here is the function:
def tuple_product(t: Tuple) -> Any:
"""Calculate the product of the tuple elements."""
result = 1
for v in t:
result *= v
return result | Calculate the product of the tuple elements. |
180,926 | import ctypes
import fnmatch
import importlib
import inspect
import os
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from typing import Any, List, Tuple, Union, Optional
from distutils.util import strtobool
import shutil
import numpy as np
_str_to_ctype = {
"uint8": ctypes.c_ubyte,
"uint16": ctypes.c_uint16,
"uint32": ctypes.c_uint32,
"uint64": ctypes.c_uint64,
"int8": ctypes.c_byte,
"int16": ctypes.c_int16,
"int32": ctypes.c_int32,
"int64": ctypes.c_int64,
"float32": ctypes.c_float,
"float64": ctypes.c_double
}
The provided code snippet includes necessary dependencies for implementing the `get_dtype_and_ctype` function. Write a Python function `def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]` to solve the following problem:
Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes.
Here is the function:
def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
"""Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
type_str = None
if isinstance(type_obj, str):
type_str = type_obj
elif hasattr(type_obj, "__name__"):
type_str = type_obj.__name__
elif hasattr(type_obj, "name"):
type_str = type_obj.name
else:
raise RuntimeError("Cannot infer type name from input")
assert type_str in _str_to_ctype.keys()
my_dtype = np.dtype(type_str)
my_ctype = _str_to_ctype[type_str]
assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
return my_dtype, my_ctype | Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes. |
180,927 | import ctypes
import fnmatch
import importlib
import inspect
import os
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from typing import Any, List, Tuple, Union, Optional
from distutils.util import strtobool
import shutil
import numpy as np
def is_pickleable(obj: Any) -> bool:
try:
with io.BytesIO() as stream:
pickle.dump(obj, stream)
return True
except:
return False | null |
180,928 | import ctypes
import fnmatch
import importlib
import inspect
import os
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from typing import Any, List, Tuple, Union, Optional
from distutils.util import strtobool
import shutil
import numpy as np
def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
"""Finds the python object with the given name and calls it as a function."""
assert func_name is not None
func_obj = get_obj_by_name(func_name)
assert callable(func_obj)
return func_obj(*args, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `construct_class_by_name` function. Write a Python function `def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any` to solve the following problem:
Finds the python class with the given name and constructs it with the given arguments.
Here is the function:
def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any:
"""Finds the python class with the given name and constructs it with the given arguments."""
return call_func_by_name(*args, func_name=class_name, **kwargs) | Finds the python class with the given name and constructs it with the given arguments. |
180,929 | import ctypes
import fnmatch
import importlib
import inspect
import os
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from typing import Any, List, Tuple, Union, Optional
from distutils.util import strtobool
import shutil
import numpy as np
def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
"""Searches for the underlying module behind the name to some python object.
Returns the module and the object name (original name with module part removed)."""
# allow convenience shorthands, substitute them by full names
obj_name = re.sub("^np.", "numpy.", obj_name)
obj_name = re.sub("^tf.", "tensorflow.", obj_name)
# list alternatives for (module_name, local_obj_name)
parts = obj_name.split(".")
name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)]
# try each alternative in turn
for module_name, local_obj_name in name_pairs:
try:
module = importlib.import_module(module_name) # may raise ImportError
get_obj_from_module(module, local_obj_name) # may raise AttributeError
return module, local_obj_name
except:
pass
# maybe some of the modules themselves contain errors?
for module_name, _local_obj_name in name_pairs:
try:
importlib.import_module(module_name) # may raise ImportError
except ImportError:
if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"):
raise
# maybe the requested attribute is missing?
for module_name, local_obj_name in name_pairs:
try:
module = importlib.import_module(module_name) # may raise ImportError
get_obj_from_module(module, local_obj_name) # may raise AttributeError
except ImportError:
pass
# we are out of luck, but we have no idea why
raise ImportError(obj_name)
The provided code snippet includes necessary dependencies for implementing the `get_module_dir_by_obj_name` function. Write a Python function `def get_module_dir_by_obj_name(obj_name: str) -> str` to solve the following problem:
Get the directory path of the module containing the given object name.
Here is the function:
def get_module_dir_by_obj_name(obj_name: str) -> str:
"""Get the directory path of the module containing the given object name."""
module, _ = get_module_from_obj_name(obj_name)
return os.path.dirname(inspect.getfile(module)) | Get the directory path of the module containing the given object name. |
180,930 | import ctypes
import fnmatch
import importlib
import inspect
import os
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from typing import Any, List, Tuple, Union, Optional
from distutils.util import strtobool
import shutil
import numpy as np
def is_top_level_function(obj: Any) -> bool:
"""Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."""
return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__
The provided code snippet includes necessary dependencies for implementing the `get_top_level_function_name` function. Write a Python function `def get_top_level_function_name(obj: Any) -> str` to solve the following problem:
Return the fully-qualified name of a top-level function.
Here is the function:
def get_top_level_function_name(obj: Any) -> str:
"""Return the fully-qualified name of a top-level function."""
assert is_top_level_function(obj)
module = obj.__module__
if module == '__main__':
module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0]
return module + "." + obj.__name__ | Return the fully-qualified name of a top-level function. |
180,931 | import ctypes
import fnmatch
import importlib
import inspect
import os
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from typing import Any, List, Tuple, Union, Optional
from distutils.util import strtobool
import shutil
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `list_dir_recursively_with_ignore` function. Write a Python function `def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]` to solve the following problem:
List all files recursively in a given directory while ignoring given file and directory names. Returns list of tuples containing both absolute and relative paths.
Here is the function:
def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
"""List all files recursively in a given directory while ignoring given file and directory names.
Returns list of tuples containing both absolute and relative paths."""
assert os.path.isdir(dir_path)
base_name = os.path.basename(os.path.normpath(dir_path))
if ignores is None:
ignores = []
result = []
for root, dirs, files in os.walk(dir_path, topdown=True):
for ignore_ in ignores:
dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
# dirs need to be edited in-place
for d in dirs_to_remove:
dirs.remove(d)
files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
absolute_paths = [os.path.join(root, f) for f in files]
relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
if add_base_to_relative:
relative_paths = [os.path.join(base_name, p) for p in relative_paths]
assert len(absolute_paths) == len(relative_paths)
result += zip(absolute_paths, relative_paths)
return result | List all files recursively in a given directory while ignoring given file and directory names. Returns list of tuples containing both absolute and relative paths. |
180,932 | import ctypes
import fnmatch
import importlib
import inspect
import os
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from typing import Any, List, Tuple, Union, Optional
from distutils.util import strtobool
import shutil
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `copy_files_and_create_dirs` function. Write a Python function `def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None` to solve the following problem:
Takes in a list of tuples of (src, dst) paths and copies files. Will create all necessary directories.
Here is the function:
def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
"""Takes in a list of tuples of (src, dst) paths and copies files.
Will create all necessary directories."""
for file in files:
target_dir_name = os.path.dirname(file[1])
# will create all intermediate-level directories
if not os.path.exists(target_dir_name):
os.makedirs(target_dir_name)
shutil.copyfile(file[0], file[1]) | Takes in a list of tuples of (src, dst) paths and copies files. Will create all necessary directories. |
180,933 | import ctypes
import fnmatch
import importlib
import inspect
import os
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from typing import Any, List, Tuple, Union, Optional
from distutils.util import strtobool
import shutil
import numpy as np
def make_cache_dir_path(*paths: str) -> str:
if _dnnlib_cache_dir is not None:
return os.path.join(_dnnlib_cache_dir, *paths)
if 'DNNLIB_CACHE_DIR' in os.environ:
return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths)
if 'HOME' in os.environ:
return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths)
if 'USERPROFILE' in os.environ:
return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths)
return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths)
def is_url(obj: Any, allow_file_urls: bool = False) -> bool:
"""Determine whether the given object is a valid URL string."""
if not isinstance(obj, str) or not "://" in obj:
return False
if allow_file_urls and obj.startswith('file://'):
return True
try:
res = requests.compat.urlparse(obj)
if not res.scheme or not res.netloc or not "." in res.netloc:
return False
res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
if not res.scheme or not res.netloc or not "." in res.netloc:
return False
except:
return False
return True
The provided code snippet includes necessary dependencies for implementing the `open_url` function. Write a Python function `def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any` to solve the following problem:
Download the given URL and return a binary-mode file object to access the data.
Here is the function:
def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any:
"""Download the given URL and return a binary-mode file object to access the data."""
assert num_attempts >= 1
assert not (return_filename and (not cache))
# Doesn't look like an URL scheme so interpret it as a local filename.
if not re.match('^[a-z]+://', url):
return url if return_filename else open(url, "rb")
# Handle file URLs. This code handles unusual file:// patterns that
# arise on Windows:
#
# file:///c:/foo.txt
#
# which would translate to a local '/c:/foo.txt' filename that's
# invalid. Drop the forward slash for such pathnames.
#
# If you touch this code path, you should test it on both Linux and
# Windows.
#
# Some internet resources suggest using urllib.request.url2pathname() but
# but that converts forward slashes to backslashes and this causes
# its own set of problems.
if url.startswith('file://'):
filename = urllib.parse.urlparse(url).path
if re.match(r'^/[a-zA-Z]:', filename):
filename = filename[1:]
return filename if return_filename else open(filename, "rb")
assert is_url(url)
# Lookup from cache.
if cache_dir is None:
cache_dir = make_cache_dir_path('downloads')
url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
if cache:
cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
if len(cache_files) == 1:
filename = cache_files[0]
return filename if return_filename else open(filename, "rb")
# Download.
url_name = None
url_data = None
with requests.Session() as session:
if verbose:
print("Downloading %s ..." % url, end="", flush=True)
for attempts_left in reversed(range(num_attempts)):
try:
with session.get(url) as res:
res.raise_for_status()
if len(res.content) == 0:
raise IOError("No data received")
if len(res.content) < 8192:
content_str = res.content.decode("utf-8")
if "download_warning" in res.headers.get("Set-Cookie", ""):
links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link]
if len(links) == 1:
url = requests.compat.urljoin(url, links[0])
raise IOError("Google Drive virus checker nag")
if "Google Drive - Quota exceeded" in content_str:
raise IOError("Google Drive download quota exceeded -- please try again later")
match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
url_name = match[1] if match else url
url_data = res.content
if verbose:
print(" done")
break
except KeyboardInterrupt:
raise
except:
if not attempts_left:
if verbose:
print(" failed")
raise
if verbose:
print(".", end="", flush=True)
# Save to cache.
if cache:
safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
safe_name = safe_name[:min(len(safe_name), 128)]
cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
os.makedirs(cache_dir, exist_ok=True)
with open(temp_file, "wb") as f:
f.write(url_data)
os.replace(temp_file, cache_file) # atomic
if return_filename:
return cache_file
# Return data as file object.
assert not return_filename
return io.BytesIO(url_data) | Download the given URL and return a binary-mode file object to access the data. |
180,934 | import os
import re
import json
from pathlib import Path
from typing import Union
from glob import glob
import torch
import click
import dnnlib
from training import training_loop
from metrics import metric_main
from torch_utils import distributed as dist
from torch_utils import custom_ops
def parse_comma_separated_list(s: Union[None, str, list]) -> Union[list, str]:
if isinstance(s, list):
return s
if s is None or s.lower() == 'none' or s == '':
return []
return s.split(',') | null |
180,935 | import os
import re
import json
from pathlib import Path
from typing import Union
from glob import glob
import torch
import click
import dnnlib
from training import training_loop
from metrics import metric_main
from torch_utils import distributed as dist
from torch_utils import custom_ops
def is_power_of_two(n: int) -> bool:
def init_dataset_kwargs(data: str, resolution: int) -> dnnlib.EasyDict:
d_kwargs = dnnlib.EasyDict(path=data, xflip=False, use_labels=True)
is_wds = len(glob(f'{data}/**/*.tar')) > 0 # check if files are tars, then it's a webdataset
if is_wds:
assert resolution, "Provide desired resolution when training on webdatasets."
d_kwargs.class_name = 'training.data_wds.WdsWrapper'
else:
d_kwargs.class_name = 'training.data_zip.ImageFolderDataset'
dataset_obj = dnnlib.util.construct_class_by_name(**d_kwargs) # Subclass of training.dataset.Dataset.
assert resolution <= dataset_obj._raw_shape[-1], f"Native dataset resolution is smaller than {resolution}"
assert is_power_of_two(resolution)
d_kwargs.resolution = resolution
return d_kwargs | null |
180,936 | import os
import time
import json
from pathlib import Path
from typing import Optional, Callable
import torch
import dnnlib
from metrics import metric_utils
from metrics import frechet_inception_distance
from metrics import precision_recall
from metrics import clip_score
_metric_dict = dict()
def register_metric(fn: Callable) -> Callable:
assert callable(fn)
_metric_dict[fn.__name__] = fn
return fn | null |
180,937 | import os
import time
import json
from pathlib import Path
from typing import Optional, Callable
import torch
import dnnlib
from metrics import metric_utils
from metrics import frechet_inception_distance
from metrics import precision_recall
from metrics import clip_score
def fid50k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
is_imfolder = opts.dataset_kwargs.class_name == 'training.data_zip.ImageFolderDataset'
assert is_imfolder, f'Calculating metrics on {opts.dataset_kwargs.class_name} are not supported'
fid = frechet_inception_distance.compute_fid(opts, max_real=None, num_gen=50000)
return dict(fid50k_full=fid) | null |
180,938 | import os
import time
import json
from pathlib import Path
from typing import Optional, Callable
import torch
import dnnlib
from metrics import metric_utils
from metrics import frechet_inception_distance
from metrics import precision_recall
from metrics import clip_score
def fid10k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
is_imfolder = opts.dataset_kwargs.class_name == 'training.data_zip.ImageFolderDataset'
assert is_imfolder, f'Calculating metrics on {opts.dataset_kwargs.class_name} are not supported'
fid = frechet_inception_distance.compute_fid(opts, max_real=None, num_gen=10000)
return dict(fid10k_full=fid) | null |
180,939 | import os
import time
import json
from pathlib import Path
from typing import Optional, Callable
import torch
import dnnlib
from metrics import metric_utils
from metrics import frechet_inception_distance
from metrics import precision_recall
from metrics import clip_score
def cs10k(opts):
assert opts.G.c_dim > 1, 'CLIP score only works for conditional generators.'
opts.dataset_kwargs.update(
class_name='training.data_zip.ImageFolderDataset',
max_size=None, xflip=False,
)
cs = clip_score.compute_clip_score(opts, num_gen=10000)
return dict(cs=cs) | null |
180,940 | import os
import time
import json
from pathlib import Path
from typing import Optional, Callable
import torch
import dnnlib
from metrics import metric_utils
from metrics import frechet_inception_distance
from metrics import precision_recall
from metrics import clip_score
def pr50k3_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
is_imfolder = opts.dataset_kwargs.class_name == 'training.data_zip.ImageFolderDataset'
assert is_imfolder, f'Calculating metrics on {opts.dataset_kwargs.class_name} are not supported'
precision, recall = precision_recall.compute_pr(opts, max_real=200000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000)
return dict(pr50k3_full_precision=precision, pr50k3_full_recall=recall) | null |
180,941 | import os
import time
import json
from pathlib import Path
from typing import Optional, Callable
import torch
import dnnlib
from metrics import metric_utils
from metrics import frechet_inception_distance
from metrics import precision_recall
from metrics import clip_score
def get_coco_path(original_path: str) -> str:
# Check if coco path was already provided.
if Path(original_path).stem == 'coco_val256':
return original_path
# Check if coco.zip is in data folder of the current dataset.
elif (Path(original_path).parent / 'coco_val256.zip').exists():
return str(Path(original_path).parent / 'coco_val256.zip')
# Check if coco.zip in ENV.
else:
path = ''
if 'COCOPATH' in os.environ:
path = os.environ["COCOPATH"]
if Path(path).stem == 'coco_val256':
return path
else:
raise ValueError(f'Did not find coco_val256. $COCOPATH: {path}')
def fid30k_coco64(opts):
coco_path = get_coco_path(opts.dataset_kwargs.path)
opts.dataset_kwargs.update(
class_name="training.data_zip.ImageFolderDataset",
path=coco_path, resolution=64, max_size=None, xflip=False,
)
fid = frechet_inception_distance.compute_fid(opts, max_real=None, num_gen=30000)
return dict(fid30k_full_coco_val=fid) | null |
180,942 | import os
import time
import json
from pathlib import Path
from typing import Optional, Callable
import torch
import dnnlib
from metrics import metric_utils
from metrics import frechet_inception_distance
from metrics import precision_recall
from metrics import clip_score
def get_coco_path(original_path: str) -> str:
# Check if coco path was already provided.
if Path(original_path).stem == 'coco_val256':
return original_path
# Check if coco.zip is in data folder of the current dataset.
elif (Path(original_path).parent / 'coco_val256.zip').exists():
return str(Path(original_path).parent / 'coco_val256.zip')
# Check if coco.zip in ENV.
else:
path = ''
if 'COCOPATH' in os.environ:
path = os.environ["COCOPATH"]
if Path(path).stem == 'coco_val256':
return path
else:
raise ValueError(f'Did not find coco_val256. $COCOPATH: {path}')
def fid30k_coco256(opts):
coco_path = get_coco_path(opts.dataset_kwargs.path)
opts.dataset_kwargs.update(
class_name="training.data_zip.ImageFolderDataset",
path=coco_path, resolution=256, max_size=None, xflip=False,
)
fid = frechet_inception_distance.compute_fid(opts, max_real=None, num_gen=30000)
return dict(fid30k_full_coco_val=fid) | null |
180,943 | import os
import time
import json
from pathlib import Path
from typing import Optional, Callable
import torch
import dnnlib
from metrics import metric_utils
from metrics import frechet_inception_distance
from metrics import precision_recall
from metrics import clip_score
def get_coco_path(original_path: str) -> str:
# Check if coco path was already provided.
if Path(original_path).stem == 'coco_val256':
return original_path
# Check if coco.zip is in data folder of the current dataset.
elif (Path(original_path).parent / 'coco_val256.zip').exists():
return str(Path(original_path).parent / 'coco_val256.zip')
# Check if coco.zip in ENV.
else:
path = ''
if 'COCOPATH' in os.environ:
path = os.environ["COCOPATH"]
if Path(path).stem == 'coco_val256':
return path
else:
raise ValueError(f'Did not find coco_val256. $COCOPATH: {path}')
def cs10k_coco(opts):
coco_path = get_coco_path(opts.dataset_kwargs.path)
assert opts.G.c_dim > 1, 'CLIP score only works for conditional generators.'
opts.dataset_kwargs.update(
class_name='training.data_zip.ImageFolderDataset',
path=coco_path, max_size=None, xflip=False,
)
cs = clip_score.compute_clip_score(opts, num_gen=30000)
return dict(cs=cs) | null |
180,944 | import os
import json
import copy
import torch
import dill
import click
import dnnlib
from metrics import metric_main
from metrics import metric_utils
from torch_utils import misc
from torch_utils import custom_ops
from torch_utils import distributed as dist
from torch_utils.ops import conv2d_gradfix
def parse_comma_separated_list(s):
if isinstance(s, list):
return s
if s is None or s.lower() == 'none' or s == '':
return []
return s.split(',') | null |
180,945 | import os
import json
import copy
import torch
import dill
import click
import dnnlib
from metrics import metric_main
from metrics import metric_utils
from torch_utils import misc
from torch_utils import custom_ops
from torch_utils import distributed as dist
from torch_utils.ops import conv2d_gradfix
The provided code snippet includes necessary dependencies for implementing the `calc_metrics` function. Write a Python function `def calc_metrics( ctx, network_pkl: str, metrics: list, data: str, mirror: bool, truncation: float, )` to solve the following problem:
Calculate quality metrics for previous training run or pretrained network pickle. Examples: \b # Previous training run: look up options automatically, save result to JSONL file. python calc_metrics.py --metrics=cs10k,fid50k_full \\ --network=~/training-runs/00000-mydataset@512-custom-gpus1-b4-bgpu2/network-snapshot-000000.pkl \b # Pre-trained network pickle: specify dataset explicitly, print result to stdout. python calc_metrics.py --metrics=fid50k_full --data=~/datasets/mydataset.zip --mirror=1 \\ --network=~/training-runs/00000-mydataset@512-custom-gpus1-b4-bgpu2/network-snapshot-000000.pkl \b General metrics: fid50k_full Frechet inception distance against the full dataset (50k generated samples). fid10k_full Frechet inception distance against the full dataset (10k generated samples). cs10k Clip score (10k generated samples). pr50k3_full Precision and recall againt the full dataset (50k generated samples, neighborhood size=3). \b Zero-shot COCO metrics: fid30k_coco64 Frechet inception distance against the COCO validation set (30k generated samples). fid30k_coco256 Frechet inception distance against the COCO validation set (30k generated samples). cs10k_coco Clip score on the COCO validation set (10k generated samples).
Here is the function:
def calc_metrics(
ctx,
network_pkl: str,
metrics: list,
data: str,
mirror: bool,
truncation: float,
):
"""Calculate quality metrics for previous training run or pretrained network pickle.
Examples:
\b
# Previous training run: look up options automatically, save result to JSONL file.
python calc_metrics.py --metrics=cs10k,fid50k_full \\
--network=~/training-runs/00000-mydataset@512-custom-gpus1-b4-bgpu2/network-snapshot-000000.pkl
\b
# Pre-trained network pickle: specify dataset explicitly, print result to stdout.
python calc_metrics.py --metrics=fid50k_full --data=~/datasets/mydataset.zip --mirror=1 \\
--network=~/training-runs/00000-mydataset@512-custom-gpus1-b4-bgpu2/network-snapshot-000000.pkl
\b
General metrics:
fid50k_full Frechet inception distance against the full dataset (50k generated samples).
fid10k_full Frechet inception distance against the full dataset (10k generated samples).
cs10k Clip score (10k generated samples).
pr50k3_full Precision and recall againt the full dataset (50k generated samples, neighborhood size=3).
\b
Zero-shot COCO metrics:
fid30k_coco64 Frechet inception distance against the COCO validation set (30k generated samples).
fid30k_coco256 Frechet inception distance against the COCO validation set (30k generated samples).
cs10k_coco Clip score on the COCO validation set (10k generated samples).
"""
# Init distributed
torch.multiprocessing.set_start_method('spawn')
dist.init()
device = torch.device('cuda')
# Validate arguments.
G_kwargs=dnnlib.EasyDict(truncation_psi=truncation)
if not all(metric_main.is_valid_metric(metric) for metric in metrics):
err = ['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()
ctx.fail('\n'.join(err))
# Load network.
if not dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl):
ctx.fail('--network must point to a file or URL')
dist.print0(f'Loading network from "{network_pkl}"...')
with dnnlib.util.open_url(network_pkl, verbose=True) as f:
network_dict = dill.load(f)
G = network_dict['G_ema'] # subclass of torch.nn.Module
# Initialize dataset options.
if data is not None:
dataset_kwargs = dnnlib.EasyDict(class_name='training.data_zip.ImageFolderDataset', path=data)
elif network_dict.get('training_set_kwargs') is not None:
dataset_kwargs = dnnlib.EasyDict(network_dict['training_set_kwargs'])
else:
ctx.fail('Could not look up dataset options; please specify --data')
# Finalize dataset options.
dataset_kwargs.resolution = G.img_resolution
dataset_kwargs.use_labels = (G.c_dim != 0)
if mirror is not None:
dataset_kwargs.xflip = mirror
# Print dataset options.
dist.print0('Dataset options:')
dist.print0(json.dumps(dataset_kwargs, indent=2))
# Locate run dir.
run_dir = None
if os.path.isfile(network_pkl):
pkl_dir = os.path.dirname(network_pkl)
if os.path.isfile(os.path.join(pkl_dir, 'training_options.json')):
run_dir = pkl_dir
# Launch processes.
dist.print0('Launching processes...')
dnnlib.util.Logger(should_flush=True)
if dist.get_rank() != 0:
custom_ops.verbosity = 'none'
# Configure torch.
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
conv2d_gradfix.enabled = True
# Print network summary.
G = copy.deepcopy(G).eval().requires_grad_(False).to(device)
if dist.get_rank() == 0:
z = torch.empty([1, G.z_dim], device=device)
c = torch.empty([1, G.c_dim], device=device)
misc.print_module_summary(G, [z, c])
# Calculate each metric.
for metric in metrics:
dist.print0(f'Calculating {metric}...')
progress = metric_utils.ProgressMonitor(verbose=True)
result_dict = metric_main.calc_metric(metric=metric, G=G, G_kwargs=G_kwargs, dataset_kwargs=dataset_kwargs,
num_gpus=dist.get_world_size(), rank=dist.get_rank(), device=device, progress=progress)
if dist.get_rank() == 0:
metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=network_pkl)
dist.print0()
# Done.
dist.print0('Exiting...') | Calculate quality metrics for previous training run or pretrained network pickle. Examples: \b # Previous training run: look up options automatically, save result to JSONL file. python calc_metrics.py --metrics=cs10k,fid50k_full \\ --network=~/training-runs/00000-mydataset@512-custom-gpus1-b4-bgpu2/network-snapshot-000000.pkl \b # Pre-trained network pickle: specify dataset explicitly, print result to stdout. python calc_metrics.py --metrics=fid50k_full --data=~/datasets/mydataset.zip --mirror=1 \\ --network=~/training-runs/00000-mydataset@512-custom-gpus1-b4-bgpu2/network-snapshot-000000.pkl \b General metrics: fid50k_full Frechet inception distance against the full dataset (50k generated samples). fid10k_full Frechet inception distance against the full dataset (10k generated samples). cs10k Clip score (10k generated samples). pr50k3_full Precision and recall againt the full dataset (50k generated samples, neighborhood size=3). \b Zero-shot COCO metrics: fid30k_coco64 Frechet inception distance against the COCO validation set (30k generated samples). fid30k_coco256 Frechet inception distance against the COCO validation set (30k generated samples). cs10k_coco Clip score on the COCO validation set (10k generated samples). |
180,946 | import os
import re
from typing import List, Optional, Union
import PIL.Image
import numpy as np
import torch
import click
import dill
from tqdm import tqdm
import dnnlib
The provided code snippet includes necessary dependencies for implementing the `parse_range` function. Write a Python function `def parse_range(s: Union[str, List]) -> List[int]` to solve the following problem:
Parse a comma separated list of numbers or ranges and return a list of ints. Example: '1,2,5-10' returns [1, 2, 5, 6, 7]
Here is the function:
def parse_range(s: Union[str, List]) -> List[int]:
'''Parse a comma separated list of numbers or ranges and return a list of ints.
Example: '1,2,5-10' returns [1, 2, 5, 6, 7]
'''
if isinstance(s, list): return s
ranges = []
range_re = re.compile(r'^(\d+)-(\d+)$')
for p in s.split(','):
m = range_re.match(p)
if m:
ranges.extend(range(int(m.group(1)), int(m.group(2))+1))
else:
ranges.append(int(p))
return ranges | Parse a comma separated list of numbers or ranges and return a list of ints. Example: '1,2,5-10' returns [1, 2, 5, 6, 7] |
180,947 | import os
import re
from typing import List, Optional, Union
import PIL.Image
import numpy as np
import torch
import click
import dill
from tqdm import tqdm
import dnnlib
The provided code snippet includes necessary dependencies for implementing the `parse_vec2` function. Write a Python function `def parse_vec2(s: Union[str, tuple[float, float]]) -> tuple[float, float]` to solve the following problem:
Parse a floating point 2-vector of syntax 'a,b'. Example: '0,1' returns (0,1)
Here is the function:
def parse_vec2(s: Union[str, tuple[float, float]]) -> tuple[float, float]:
'''Parse a floating point 2-vector of syntax 'a,b'.
Example:
'0,1' returns (0,1)
'''
if isinstance(s, tuple): return s
parts = s.split(',')
if len(parts) == 2:
return (float(parts[0]), float(parts[1]))
raise ValueError(f'cannot parse 2-vector {s}') | Parse a floating point 2-vector of syntax 'a,b'. Example: '0,1' returns (0,1) |
180,948 | import os
import re
from typing import List, Optional, Union
import PIL.Image
import numpy as np
import torch
import click
import dill
from tqdm import tqdm
import dnnlib
def make_transform(translate: tuple[float,float], angle: float) -> np.ndarray:
def generate_images(
network_pkl: str,
seeds: List[int],
prompt: Optional[str],
outdir: str,
truncation: float,
noise_mode: str,
translate: tuple[float,float],
rotate: float,
device: torch.device,
) -> None:
print(f'Loading networks from "{network_pkl}"...')
with dnnlib.util.open_url(network_pkl) as f:
G = dill.load(f)['G_ema']
G = G.eval().requires_grad_(False).to(device)
if G.c_dim > 1:
assert prompt, "Provide a prompt for conditional generators."
os.makedirs(outdir, exist_ok=True)
for seed in tqdm(seeds):
# Construct an inverse rotation/translation matrix and pass to the generator. The
# generator expects this matrix as an inverse to avoid potentially failing numerical
# operations in the network.
if hasattr(G.synthesis, 'input'):
m = make_transform(translate, rotate)
m = np.linalg.inv(m)
G.synthesis.input.transform.copy_(torch.from_numpy(m))
# Generate
z = np.random.RandomState(seed).randn(1, G.z_dim)
z = torch.from_numpy(z).float().to(device)
w = G.mapping(z, [prompt]*len(z), truncation_psi=truncation)
img = G.synthesis(w, noise_mode=noise_mode)
# Save
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/seed{seed:04d}.png') | null |
180,949 | import numpy as np
import torch
import torch.nn.functional as F
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'translation': [rand_translation],
'resize': [rand_resize],
'cutout': [rand_cutout],
}
def DiffAugment(x: torch.Tensor, policy: str = '', channels_first: bool = True) -> torch.Tensor:
if policy:
if not channels_first:
x = x.permute(0, 3, 1, 2)
for p in policy.split(','):
for f in AUGMENT_FNS[p]:
x = f(x)
if not channels_first:
x = x.permute(0, 2, 3, 1)
x = x.contiguous()
return x | null |
180,950 | import numpy as np
import torch
import torch.nn.functional as F
def rand_brightness(x: torch.Tensor) -> torch.Tensor:
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)
return x | null |
180,951 | import numpy as np
import torch
import torch.nn.functional as F
def rand_saturation(x: torch.Tensor) -> torch.Tensor:
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean
return x | null |
180,952 | import numpy as np
import torch
import torch.nn.functional as F
def rand_contrast(x: torch.Tensor) -> torch.Tensor:
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean
return x | null |
180,953 | import numpy as np
import torch
import torch.nn.functional as F
def rand_translation(x: torch.Tensor, ratio: float = 0.125) -> torch.Tensor:
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2)
return x | null |
180,954 | import numpy as np
import torch
import torch.nn.functional as F
def rand_cutout(x: torch.Tensor, ratio: float = 0.2) -> torch.Tensor:
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x | null |
180,955 | import numpy as np
import torch
import torch.nn.functional as F
def rand_resize(x: torch.Tensor, min_ratio: float = 0.8, max_ratio: float = 1.2) -> torch.Tensor:
resize_ratio = np.random.rand()*(max_ratio-min_ratio) + min_ratio
resized_img = F.interpolate(x, size=int(resize_ratio*x.shape[3]), mode='bilinear')
org_size = x.shape[3]
if int(resize_ratio*x.shape[3]) < x.shape[3]:
left_pad = (x.shape[3]-int(resize_ratio*x.shape[3]))/2.
left_pad = int(left_pad)
right_pad = x.shape[3] - left_pad - resized_img.shape[3]
x = F.pad(resized_img, (left_pad, right_pad, left_pad, right_pad), "constant", 0.)
else:
left = (int(resize_ratio*x.shape[3])-x.shape[3])/2.
left = int(left)
x = resized_img[:, :, left:(left+x.shape[3]), left:(left+x.shape[3])]
assert x.shape[2] == org_size
assert x.shape[3] == org_size
return x | null |
180,956 | import os
import math
import time
import copy
import json
import PIL.Image
from typing import Union, Iterator, Optional, Any
import dill
import psutil
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.utils.tensorboard as tensorboard
import dnnlib
from torch_utils import training_stats
from torch_utils import misc
from torch_utils import distributed as dist
from torch_utils.ops import conv2d_gradfix
from metrics import metric_main
from training.data_wds import wds_dataloader
from docs.prompts import PROMPTS_TRAINING
def setup_snapshot_image_grid(
training_set: Any,
random_seed: int = 0,
gw: Optional[int] = None,
gh: Optional[int] = None,
) -> tuple[tuple[int,int], np.ndarray, np.ndarray]:
rnd = np.random.RandomState(random_seed)
if gw is None:
gw = np.clip(7680 // training_set.image_shape[2], 7, 32)
if gh is None:
gh = np.clip(4320 // training_set.image_shape[1], 4, 32)
# No labels => show random subset of training samples.
if not training_set.has_labels:
all_indices = list(range(len(training_set)))
rnd.shuffle(all_indices)
grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]
elif training_set.labels_are_text:
all_indices = list(range(len(training_set)))
grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]
else:
# Group training samples by label.
label_groups = dict() # label => [idx, ...]
for idx in range(len(training_set)):
label = tuple(training_set.get_details(idx).raw_label.flat[::-1])
if label not in label_groups:
label_groups[label] = []
label_groups[label].append(idx)
# Reorder.
label_order = sorted(label_groups.keys())
for label in label_order:
rnd.shuffle(label_groups[label])
# Organize into grid.
grid_indices = []
for y in range(gh):
label = label_order[y % len(label_order)]
indices = label_groups[label]
grid_indices += [indices[x % len(indices)] for x in range(gw)]
label_groups[label] = [indices[(i + gw) % len(indices)] for i in range(len(indices))]
# Load data.
images, labels = zip(*[training_set[i] for i in grid_indices])
return (gw, gh), np.stack(images), np.stack(labels)
def save_image_grid(
img: torch.Tensor,
drange: tuple[int, int],
grid_size: tuple[int, int],
fname: str = '',
) -> Optional[np.ndarray]:
"""Build image grid, save if fname is given"""
lo, hi = drange
img = np.asarray(img, dtype=np.float32)
img = (img - lo) * (255 / (hi - lo))
img = np.rint(img).clip(0, 255).astype(np.uint8)
gw, gh = grid_size
_N, C, H, W = img.shape
img = img.reshape([gh, gw, C, H, W])
img = img.transpose(0, 3, 1, 4, 2)
img = img.reshape([gh * H, gw * W, C])
# Save or return.
if fname:
if C == 3:
PIL.Image.fromarray(img, 'RGB').save(fname)
else:
PIL.Image.fromarray(img[:, :, 0], 'L').save(fname)
else:
return img
def save_samples(
G_ema: nn.Module,
run_dir: str,
device: torch.device,
suffix: str,
prompts: list[str] = PROMPTS_TRAINING,
n_samples: int = 5,
) -> None:
save_path = os.path.join(run_dir, f"samples_{suffix}.png")
if G_ema.c_dim > 0:
z = np.random.RandomState(0).randn(n_samples, G_ema.z_dim);
z = torch.from_numpy(z).float().to(device)
prompts_results = dict()
for prompt in prompts:
ws = [
G_ema.mapping(z, [prompt]*len(z), truncation_psi=1.0),
G_ema.mapping(z, [prompt]*len(z), truncation_psi=0.0),
]
imgs = []
for wi in ws:
imgs.append(G_ema.synthesis(wi, noise_mode="const").cpu())
prompts_results[prompt] = save_image_grid(torch.cat(imgs), drange=[-1, 1], grid_size=(n_samples, -1))
save_prompts_dict(prompts_results, path=save_path)
else:
z = np.random.RandomState(0).randn(n_samples**2, G_ema.z_dim)
z = torch.from_numpy(z).float().to(device)
imgs = G_ema(z, c=None, noise_mode="const").cpu().numpy()
save_image_grid(imgs, drange=[-1,1], grid_size=(n_samples, n_samples), fname=save_path)
def network_summaries(G: nn.Module, D: nn.Module, device: torch.device) -> None:
z = torch.randn([1, G.z_dim], device=device)
c = torch.randn([1, G.c_dim], device=device)
img = misc.print_module_summary(G, [z, c])
misc.print_module_summary(D, [img, c])
def sync_grads(network: nn.Module, gain: Optional[int] = None) -> None:
params = [param for param in network.parameters() if param.grad is not None]
flat_grads = torch.cat([param.grad.flatten() for param in params])
flat_grads = sharded_all_mean(flat_grads)
flat_grads = flat_grads if gain is None else flat_grads * gain
torch.nan_to_num(flat_grads, nan=0, posinf=1e5, neginf=-1e5, out=flat_grads)
grads = flat_grads.split([param.numel() for param in params])
for param, grad in zip(params, grads):
param.grad = grad.reshape(param.size())
def fetch_data(
training_set_iterator: Iterator,
z_dim: int,
device: torch.device,
batches_num: int,
batch_size: int,
batch_gpu: int
) -> tuple[torch.Tensor, Union[list[str], None]]:
# Get data and sample latents.
real_img, real_cs = next(training_set_iterator)
real_img = real_img.to(device).to(torch.float32) / 127.5 - 1
gen_zs = torch.randn([batches_num * batch_size, z_dim], device = device)
# Split for phases.
real_img = split(real_img, batch_gpu)
gen_zs = [split(gen_z, batch_gpu) for gen_z in split(gen_zs, batch_size)]
real_cs = split(real_cs, batch_gpu)
return real_img, real_cs, gen_zs
def partial_freeze(phase: dnnlib.EasyDict) -> None:
if phase.name == 'D':
phase.module.dino.requires_grad_(False)
elif phase.name == 'G':
trainable_layers = phase.module.trainable_layers
phase.module.requires_grad_(False)
for name, layer in phase.module.named_modules():
should_train = any(layer_type in name for layer_type in trainable_layers)
layer.requires_grad_(should_train)
def wds_dataloader(
train_data: list[str],
*,
batch_size: int,
resolution: int,
workers: int = 3,
shard_shuffle_size: int = 1000,
sample_shuffle_size: int = 10000,
) -> wds.WebLoader:
input_shards = train_data
assert input_shards is not None
dataset = wds.DataPipeline([
wds.ResampledShards(input_shards),
tarfile_to_samples_nothrow,
wds.shuffle(shard_shuffle_size),
wds.select(filter_no_caption),
wds.decode("pilrgb", handler=log_and_continue),
wds.shuffle(sample_shuffle_size),
wds.rename(image="jpg;png", text="txt"),
wds.map_dict(image=partial(preprocess_img, resolution=resolution), text=preprocess_txt),
wds.to_tuple("image", "text"),
wds.batched(batch_size),
])
# build dataloader
dataloader = wds.WebLoader(
dataset,
batch_size=None,
shuffle=False,
num_workers=workers,
)
return dataloader
def training_loop(
run_dir = '.', # Output directory.
training_set_kwargs = {}, # Options for training set.
data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader.
G_kwargs = {}, # Options for generator network.
D_kwargs = {}, # Options for discriminator network.
G_opt_kwargs = {}, # Options for generator optimizer.
D_opt_kwargs = {}, # Options for discriminator optimizer.
loss_kwargs = {}, # Options for loss function.
metrics = [], # Metrics to evaluate during training.
random_seed = 0, # Global random seed.
batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus.
batch_gpu = 4, # Number of samples processed at a time by one GPU.
ema_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights.
ema_rampup = 0.05, # EMA ramp-up coefficient. None = no rampup.
total_kimg = 25000, # Total length of the training, measured in thousands of real images.
kimg_per_tick = 4, # Progress snapshot interval.
image_snapshot_ticks = 50, # How often to save image snapshots? None = disable.
network_snapshot_ticks = 50, # How often to save network snapshots? None = disable.
resume_pkl = None, # Network pickle to resume training from.
resume_kimg = 0, # First kimg to report when resuming training.
cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark?
abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks.
progress_fn = None, # Callback function for updating training progress. Called for all ranks.
device = torch.device('cuda'),
) -> None:
# Initialize.
start_time = time.time()
np.random.seed(random_seed * dist.get_world_size() + dist.get_rank())
torch.manual_seed(random_seed * dist.get_world_size() + dist.get_rank())
torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed.
torch.backends.cuda.matmul.allow_tf32 = False # Improves numerical accuracy.
torch.backends.cudnn.allow_tf32 = False # Improves numerical accuracy.
conv2d_gradfix.enabled = True # Improves training speed.
# Select batch size per GPU.
batch_gpu_total = batch_size // dist.get_world_size()
if batch_gpu is None or batch_gpu > batch_gpu_total:
batch_gpu = batch_gpu_total
n_batch_acc = batch_gpu_total // batch_gpu
assert batch_size == batch_gpu * n_batch_acc * dist.get_world_size()
# Load training set. Choose between WDS and zip dataloader.
dist.print0('Loading training set...')
training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs)
if training_set_kwargs.class_name == 'training.data_wds.WdsWrapper':
training_set_iterator = iter(wds_dataloader(training_set.urls, resolution=training_set.resolution, batch_size=batch_size//dist.get_world_size()))
else:
training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=dist.get_rank(), num_replicas=dist.get_world_size(), seed=random_seed)
training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//dist.get_world_size(), **data_loader_kwargs))
dist.print0('Num images: ', len(training_set))
dist.print0('Image shape:', training_set.image_shape)
dist.print0('Label shape:', training_set.label_shape)
dist.print0()
# Construct networks.
dist.print0('Constructing networks...')
G = dnnlib.util.construct_class_by_name(conditional=(training_set.label_dim>0), **G_kwargs).train().requires_grad_(False).to(device)
G_ema = copy.deepcopy(G).eval()
D = dnnlib.util.construct_class_by_name(c_dim=G.c_dim, **D_kwargs).train().requires_grad_(False).to(device)
# Check for existing checkpoint
data = {}
if (resume_pkl is not None) and dist.get_rank() == 0:
dist.print0(f'Resuming from "{resume_pkl}"')
with dnnlib.util.open_url(resume_pkl) as f:
data = dill.load(f)
misc.copy_params_and_buffers(src_module=data['D'], dst_module=D, require_all=False)
misc.copy_params_and_buffers(src_module=data['G_ema'], dst_module=G, require_all=False)
misc.copy_params_and_buffers(src_module=data['G_ema'], dst_module=G_ema, require_all=False)
del data
# Print network summary tables.
if dist.get_rank() == 0:
network_summaries(G, D, device)
# Distribute across GPUs.
dist.print0(f'Distributing across {dist.get_world_size()} GPUs...')
for module in [G, D, G_ema]:
if module is not None and dist.get_world_size() > 1:
for param in misc.params_and_buffers(module):
torch.distributed.broadcast(param, src=0)
# Setup training phases.
dist.print0('Setting up training phases...')
loss = dnnlib.util.construct_class_by_name(device=device, G=G, D=D, **loss_kwargs)
phases = []
for name, module, opt_kwargs in [('D', D, D_opt_kwargs), ('G', G, G_opt_kwargs)]:
opt = dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs)
phases += [dnnlib.EasyDict(name=name, module=module, opt=opt, interval=1)]
for phase in phases:
phase.start_event = None
phase.end_event = None
if dist.get_rank() == 0:
phase.start_event = torch.cuda.Event(enable_timing=True)
phase.end_event = torch.cuda.Event(enable_timing=True)
# Export sample images.
dist.print0('Exporting sample images...')
if dist.get_rank() == 0:
grid_size, images, _ = setup_snapshot_image_grid(training_set)
save_image_grid(images, drange=[0, 255], grid_size=grid_size, fname=os.path.join(run_dir, "reals.png"))
# Initialize logs.
dist.print0('Initializing logs...')
stats_collector = training_stats.Collector(regex='.*')
stats_metrics = dict()
stats_jsonl = None
stats_tfevents = None
if dist.get_rank() == 0:
stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt')
stats_tfevents = tensorboard.SummaryWriter(run_dir)
# Train.
dist.print0(f'Training for {total_kimg} kimg...')
dist.print0()
cur_nimg = resume_kimg * 1000
cur_tick = 0
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - start_time
batch_idx = 0
if progress_fn is not None:
progress_fn(cur_nimg // 1000, total_kimg)
while True:
# Get data
phase_real_img, phase_real_c, all_gen_z = fetch_data(training_set_iterator, G.z_dim, device, len(phases), batch_size, batch_gpu)
# Execute training phases.
for phase, phase_gen_z in zip(phases, all_gen_z):
if batch_idx % phase.interval != 0:
continue
if phase.start_event is not None:
phase.start_event.record(torch.cuda.current_stream(device))
# Enable/disable gradients.
phase.module.requires_grad_(True)
partial_freeze(phase)
# Accumulate gradients.
for real_img, real_c, gen_z in zip(phase_real_img, phase_real_c, phase_gen_z):
loss.accumulate_gradients(phase=phase.name, real_img=real_img, c_raw=real_c, gen_z=gen_z, cur_nimg=cur_nimg)
phase.module.requires_grad_(False)
# Update weights.
params = [param for param in phase.module.parameters() if param.grad is not None]
if len(params) > 0:
sync_grads(network=phase.module, gain=n_batch_acc)
phase.opt.step()
phase.opt.zero_grad(set_to_none=True)
# Phase done.
if phase.end_event is not None:
phase.end_event.record(torch.cuda.current_stream(device))
# Update G_ema.
ema_nimg = ema_kimg * 1000
if ema_rampup is not None:
ema_nimg = min(ema_nimg, cur_nimg * ema_rampup)
ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8))
for p_ema, p in zip(G_ema.parameters(), G.parameters()):
p_ema.copy_(p.lerp(p_ema, ema_beta))
for b_ema, b in zip(G_ema.buffers(), G.buffers()):
b_ema.copy_(b)
# Update state.
cur_nimg += batch_size
batch_idx += 1
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000)
if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000):
continue
# Print status line, accumulating the same information in training_stats.
tick_end_time = time.time()
fields = []
fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
fields += [f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"]
fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"]
fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"]
fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"]
fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"]
fields += [f"reserved {training_stats.report0('Resources/peak_gpu_mem_reserved_gb', torch.cuda.max_memory_reserved(device) / 2**30):<6.2f}"]
torch.cuda.reset_peak_memory_stats()
training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60))
training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60))
dist.print0(' '.join(fields))
# Check for abort.
if (not done) and (abort_fn is not None) and abort_fn():
done = True
dist.print0()
dist.print0('Aborting...')
# Save image snapshot.
if (dist.get_rank() == 0) and (image_snapshot_ticks is not None) and (done or cur_tick % image_snapshot_ticks == 0):
save_samples(G_ema, run_dir=run_dir, device=device, suffix=f'{cur_nimg//1000:06d}')
# Save network snapshot.
data = None
if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0):
data = dict(G=G, D=D, G_ema=G_ema, training_set_kwargs=dict(training_set_kwargs))
for key, value in data.items():
if isinstance(value, torch.nn.Module):
value = copy.deepcopy(value).eval().requires_grad_(False)
misc.check_ddp_consistency(value, ignore_regex=r'.*\.[^.]+_(avg|ema)')
for param in misc.params_and_buffers(value):
torch.distributed.broadcast(param, src=0)
data[key] = value.cpu()
del value # conserve memory
if dist.get_rank() == 0:
snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl')
with open(snapshot_pkl, 'wb') as f:
dill.dump(data, f)
# Evaluate metrics.
if cur_tick and (data is not None) and (len(metrics) > 0):
dist.print0('Evaluating metrics...')
for metric in metrics:
result_dict = metric_main.calc_metric(metric=metric, G=data['G_ema'],
dataset_kwargs=training_set_kwargs, num_gpus=dist.get_world_size(), rank=dist.get_rank(), device=device)
if dist.get_rank() == 0:
metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)
stats_metrics.update(result_dict.results)
del data # conserve memory
# Collect statistics.
for phase in phases:
value = []
if (phase.start_event is not None) and (phase.end_event is not None):
phase.end_event.synchronize()
value = phase.start_event.elapsed_time(phase.end_event)
training_stats.report0('Timing/' + phase.name, value)
stats_collector.update()
stats_dict = stats_collector.as_dict()
# Update logs.
timestamp = time.time()
if stats_jsonl is not None:
fields = dict(stats_dict, timestamp=timestamp)
stats_jsonl.write(json.dumps(fields) + '\n')
stats_jsonl.flush()
if stats_tfevents is not None:
global_step = int(cur_nimg / 1e3)
walltime = timestamp - start_time
for name, value in stats_dict.items():
stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime)
for name, value in stats_metrics.items():
stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime)
stats_tfevents.flush()
if progress_fn is not None:
progress_fn(cur_nimg // 1000, total_kimg)
# Update state.
cur_tick += 1
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - tick_end_time
if done:
break
# Done.
dist.print0()
dist.print0('Exiting...') | null |
180,957 | import types
import math
from typing import Callable
import torch
import torch.nn as nn
import torch.nn.functional as F
def forward_flex(self, x: torch.Tensor) -> torch.Tensor:
# patch proj and dynamically resize
B, C, H, W = x.size()
x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
pos_embed = self._resize_pos_embed(
self.pos_embed, H // self.patch_size[1], W // self.patch_size[0]
)
# add cls token
cls_tokens = self.cls_token.expand(
x.size(0), -1, -1
)
x = torch.cat((cls_tokens, x), dim=1)
# forward pass
x = x + pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x
activations = {}
def forward_vit(pretrained: nn.Module, x: torch.Tensor) -> dict:
_, _, H, W = x.size()
_ = pretrained.model.forward_flex(x)
return {k: pretrained.rearrange(v) for k, v in activations.items()} | null |
180,958 | import types
import math
from typing import Callable
import torch
import torch.nn as nn
import torch.nn.functional as F
class AddReadout(nn.Module):
def __init__(self, start_index: bool = 1):
def forward(self, x: torch.Tensor) -> torch.Tensor:
class Transpose(nn.Module):
def __init__(self, dim0: int, dim1: int):
def forward(self, x: torch.Tensor) -> torch.Tensor:
def _resize_pos_embed(self, posemb: torch.Tensor, gs_h: int, gs_w: int) -> torch.Tensor:
def forward_flex(self, x: torch.Tensor) -> torch.Tensor:
def get_activation(name: str) -> Callable:
def make_vit_backbone(
model: nn.Module,
patch_size: list[int] = [16, 16],
hooks: list[int] = [2, 5, 8, 11],
hook_patch: bool = True,
start_index: list[int] = 1,
):
assert len(hooks) == 4
pretrained = nn.Module()
pretrained.model = model
# add hooks
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation('0'))
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation('1'))
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation('2'))
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation('3'))
if hook_patch:
pretrained.model.pos_drop.register_forward_hook(get_activation('4'))
# configure readout
pretrained.rearrange = nn.Sequential(AddReadout(start_index), Transpose(1, 2))
pretrained.model.start_index = start_index
pretrained.model.patch_size = patch_size
# We inject this function into the VisionTransformer instances so that
# we can use it with interpolated position embeddings without modifying the library source.
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
pretrained.model._resize_pos_embed = types.MethodType(
_resize_pos_embed, pretrained.model
)
return pretrained | null |
180,959 | from typing import Union, Any, Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch_utils import misc
from torch_utils.ops import upfirdn2d, conv2d_resample, bias_act, fma
from networks.shared import FullyConnectedLayer, MLP
from networks.clip import CLIP
def is_list_of_strings(arr: Any) -> bool:
if arr is None: return False
is_list = isinstance(arr, list) or isinstance(arr, np.ndarray) or isinstance(arr, tuple)
entry_is_str = isinstance(arr[0], str)
return is_list and entry_is_str | null |
180,960 | from typing import Union, Any, Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch_utils import misc
from torch_utils.ops import upfirdn2d, conv2d_resample, bias_act, fma
from networks.shared import FullyConnectedLayer, MLP
from networks.clip import CLIP
def normalize_2nd_moment(x: torch.Tensor, dim: int = 1, eps: float = 1e-8) -> torch.Tensor:
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt() | null |
180,961 | from typing import Union, Any, Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch_utils import misc
from torch_utils.ops import upfirdn2d, conv2d_resample, bias_act, fma
from networks.shared import FullyConnectedLayer, MLP
from networks.clip import CLIP
def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):
r"""2D convolution with optional up/downsampling.
Padding is performed only once at the beginning, not between the operations.
Args:
x: Input tensor of shape
`[batch_size, in_channels, in_height, in_width]`.
w: Weight tensor of shape
`[out_channels, in_channels//groups, kernel_height, kernel_width]`.
f: Low-pass filter for up/downsampling. Must be prepared beforehand by
calling upfirdn2d.setup_filter(). None = identity (default).
up: Integer upsampling factor (default: 1).
down: Integer downsampling factor (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
groups: Split input channels into N groups (default: 1).
flip_weight: False = convolution, True = correlation (default: True).
flip_filter: False = convolution, True = correlation (default: False).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and (x.ndim == 4)
assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)
assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32)
assert isinstance(up, int) and (up >= 1)
assert isinstance(down, int) and (down >= 1)
assert isinstance(groups, int) and (groups >= 1)
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
fw, fh = _get_filter_size(f)
px0, px1, py0, py1 = _parse_padding(padding)
# Adjust padding to account for up/downsampling.
if up > 1:
px0 += (fw + up - 1) // 2
px1 += (fw - up) // 2
py0 += (fh + up - 1) // 2
py1 += (fh - up) // 2
if down > 1:
px0 += (fw - down + 1) // 2
px1 += (fw - down) // 2
py0 += (fh - down + 1) // 2
py1 += (fh - down) // 2
# Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
if kw == 1 and kh == 1 and (down > 1 and up == 1):
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
return x
# Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
if kw == 1 and kh == 1 and (up > 1 and down == 1):
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
return x
# Fast path: downsampling only => use strided convolution.
if down > 1 and up == 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)
return x
# Fast path: upsampling with optional downsampling => use transpose strided convolution.
if up > 1:
if groups == 1:
w = w.transpose(0, 1)
else:
w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)
w = w.transpose(1, 2)
w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw)
px0 -= kw - 1
px1 -= kw - up
py0 -= kh - 1
py1 -= kh - up
pxt = max(min(-px0, -px1), 0)
pyt = max(min(-py0, -py1), 0)
x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
# Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
if up == 1 and down == 1:
if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)
# Fallback: Generic reference implementation.
x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
def fma(a, b, c): # => a * b + c
return _FusedMultiplyAdd.apply(a, b, c)
def modulated_conv2d(
x: torch.Tensor, # Input tensor of shape [batch_size, in_channels, in_height, in_width].
weight: torch.Tensor, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
styles: torch.Tensor, # Modulation coefficients of shape [batch_size, in_channels].
noise: Optional[torch.Tensor] = None, # Optional noise tensor to add to the output activations.
up: int = 1, # Integer upsampling factor.
down: int = 1, # Integer downsampling factor.
padding: int = 0, # Padding with respect to the upsampled image.
resample_filter: Optional[list[int]] = None, # Low-pass filter to apply when resampling activations.
demodulate: bool = True, # Apply weight demodulation?
flip_weight: bool = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
fused_modconv: bool = True, # Perform modulation, convolution, and demodulation as a single fused operation?
) -> torch.Tensor:
batch_size = x.shape[0]
out_channels, in_channels, kh, kw = weight.shape
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
# Pre-normalize inputs to avoid FP16 overflow.
if x.dtype == torch.float16 and demodulate:
weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk
styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I
# Calculate per-sample weights and demodulation coefficients.
w = None
dcoefs = None
if demodulate or fused_modconv:
w = weight.unsqueeze(0) # [NOIkk]
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
if demodulate:
dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
if demodulate and fused_modconv:
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
# Execute by scaling the activations before and after the convolution.
if not fused_modconv:
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
if demodulate and noise is not None:
x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype))
elif demodulate:
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
elif noise is not None:
x = x.add_(noise.to(x.dtype))
return x
# Execute as one fused op using grouped convolution.
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
batch_size = int(batch_size)
misc.assert_shape(x, [batch_size, in_channels, None, None])
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
x = x.reshape(batch_size, -1, *x.shape[2:])
if noise is not None:
x = x.add_(noise)
return x | null |
180,962 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.spectral_norm import SpectralNorm
from torchvision.transforms import RandomCrop, Normalize
import timm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from torch_utils import misc
from networks.shared import ResidualBlock, FullyConnectedLayer
from networks.vit_utils import make_vit_backbone, forward_vit
from training.diffaug import DiffAugment
class SpectralConv1d(nn.Conv1d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
SpectralNorm.apply(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12)
class BatchNormLocal(nn.Module):
def __init__(self, num_features: int, affine: bool = True, virtual_bs: int = 8, eps: float = 1e-5):
super().__init__()
self.virtual_bs = virtual_bs
self.eps = eps
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
def forward(self, x: torch.Tensor) -> torch.Tensor:
shape = x.size()
# Reshape batch into groups.
G = np.ceil(x.size(0)/self.virtual_bs).astype(int)
x = x.view(G, -1, x.size(-2), x.size(-1))
# Calculate stats.
mean = x.mean([1, 3], keepdim=True)
var = x.var([1, 3], keepdim=True, unbiased=False)
x = (x - mean) / (torch.sqrt(var + self.eps))
if self.affine:
x = x * self.weight[None, :, None] + self.bias[None, :, None]
return x.view(shape)
def make_block(channels: int, kernel_size: int) -> nn.Module:
return nn.Sequential(
SpectralConv1d(
channels,
channels,
kernel_size = kernel_size,
padding = kernel_size//2,
padding_mode = 'circular',
),
BatchNormLocal(channels),
nn.LeakyReLU(0.2, True),
) | null |
180,964 | import sys
import dill
import io
import inspect
import copy
import uuid
import types
import dnnlib
_import_hooks = []
The provided code snippet includes necessary dependencies for implementing the `import_hook` function. Write a Python function `def import_hook(hook)` to solve the following problem:
r"""Register an import hook that is called whenever a persistent object is being unpickled. A typical use case is to patch the pickled source code to avoid errors and inconsistencies when the API of some imported module has changed. The hook should have the following signature: hook(meta) -> modified meta `meta` is an instance of `dnnlib.EasyDict` with the following fields: type: Type of the persistent object, e.g. `'class'`. version: Internal version number of `torch_utils.persistence`. module_src Original source code of the Python module. class_name: Class name in the original Python module. state: Internal state of the object. Example: @persistence.import_hook def wreck_my_network(meta): if meta.class_name == 'MyNetwork': print('MyNetwork is being imported. I will wreck it!') meta.module_src = meta.module_src.replace("True", "False") return meta
Here is the function:
def import_hook(hook):
r"""Register an import hook that is called whenever a persistent object
is being unpickled. A typical use case is to patch the pickled source
code to avoid errors and inconsistencies when the API of some imported
module has changed.
The hook should have the following signature:
hook(meta) -> modified meta
`meta` is an instance of `dnnlib.EasyDict` with the following fields:
type: Type of the persistent object, e.g. `'class'`.
version: Internal version number of `torch_utils.persistence`.
module_src Original source code of the Python module.
class_name: Class name in the original Python module.
state: Internal state of the object.
Example:
@persistence.import_hook
def wreck_my_network(meta):
if meta.class_name == 'MyNetwork':
print('MyNetwork is being imported. I will wreck it!')
meta.module_src = meta.module_src.replace("True", "False")
return meta
"""
assert callable(hook)
_import_hooks.append(hook) | r"""Register an import hook that is called whenever a persistent object is being unpickled. A typical use case is to patch the pickled source code to avoid errors and inconsistencies when the API of some imported module has changed. The hook should have the following signature: hook(meta) -> modified meta `meta` is an instance of `dnnlib.EasyDict` with the following fields: type: Type of the persistent object, e.g. `'class'`. version: Internal version number of `torch_utils.persistence`. module_src Original source code of the Python module. class_name: Class name in the original Python module. state: Internal state of the object. Example: @persistence.import_hook def wreck_my_network(meta): if meta.class_name == 'MyNetwork': print('MyNetwork is being imported. I will wreck it!') meta.module_src = meta.module_src.replace("True", "False") return meta |
180,965 | import torch
from pkg_resources import parse_version
def _should_use_custom_op():
class _GridSample2dForward(torch.autograd.Function):
def forward(ctx, input, grid):
def backward(ctx, grad_output):
def grid_sample(input, grid):
if _should_use_custom_op():
return _GridSample2dForward.apply(input, grid)
return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) | null |
180,972 | import re
import contextlib
import numpy as np
import torch
import warnings
import dnnlib
_constant_cache = dict()
def constant(value, shape=None, dtype=None, device=None, memory_format=None):
value = np.asarray(value)
if shape is not None:
shape = tuple(shape)
if dtype is None:
dtype = torch.get_default_dtype()
if device is None:
device = torch.device('cpu')
if memory_format is None:
memory_format = torch.contiguous_format
key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format)
tensor = _constant_cache.get(key, None)
if tensor is None:
tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device)
if shape is not None:
tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape))
tensor = tensor.contiguous(memory_format=memory_format)
_constant_cache[key] = tensor
return tensor | null |
180,974 | import re
import contextlib
import numpy as np
import torch
import warnings
import dnnlib
def get_children(model: torch.nn.Module):
children = list(model.children())
flatt_children = []
if children == []:
return model
else:
for child in children:
try:
flatt_children.extend(get_children(child))
except TypeError:
flatt_children.append(get_children(child))
return flatt_children
def spectral_to_cpu(model: torch.nn.Module):
def wrapped_in_spectral(m): return hasattr(m, 'weight_v')
children = get_children(model)
for child in children:
if wrapped_in_spectral(child):
child.weight = child.weight.cpu()
return model | null |
180,976 | import os
import torch
from . import training_stats
def should_stop():
return False | null |
180,977 | import os
import torch
from . import training_stats
def update_progress(cur, total):
_ = cur, total | null |
180,978 | import functools
import PIL.Image
import gzip
import io
import json
import os
import pickle
import re
import sys
import tarfile
import zipfile
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import requests
import numpy as np
import click
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `parse_tuple` function. Write a Python function `def parse_tuple(s: str) -> Tuple[int, int]` to solve the following problem:
Parse a 'M,N' or 'MxN' integer tuple. Example: '4x2' returns (4,2)
Here is the function:
def parse_tuple(s: str) -> Tuple[int, int]:
"""
Parse a 'M,N' or 'MxN' integer tuple.
Example: '4x2' returns (4,2)
"""
m = re.match(r'^(\d+)[x,](\d+)$', s)
if m:
return int(m.group(1)), int(m.group(2))
raise click.ClickException(f'cannot parse tuple {s}') | Parse a 'M,N' or 'MxN' integer tuple. Example: '4x2' returns (4,2) |
180,979 | import functools
import PIL.Image
import gzip
import io
import json
import os
import pickle
import re
import sys
import tarfile
import zipfile
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import requests
import numpy as np
import click
from tqdm import tqdm
def make_transform(
transform: Optional[str],
output_width: Optional[int],
output_height: Optional[int]
) -> Callable[[np.ndarray], Optional[np.ndarray]]:
def scale(width, height, img):
w = img.shape[1]
h = img.shape[0]
if width == w and height == h:
return img
img = PIL.Image.fromarray(img)
ww = width if width is not None else w
hh = height if height is not None else h
img = img.resize((ww, hh), PIL.Image.Resampling.LANCZOS)
return np.array(img)
def center_crop(width, height, img):
crop = np.min(img.shape[:2])
img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2]
if img.ndim == 2:
img = img[:, :, np.newaxis].repeat(3, axis=2)
img = PIL.Image.fromarray(img, 'RGB')
img = img.resize((width, height), PIL.Image.Resampling.LANCZOS)
return np.array(img)
def center_crop_wide(width, height, img):
ch = int(np.round(width * img.shape[0] / img.shape[1]))
if img.shape[1] < width or ch < height:
return None
img = img[(img.shape[0] - ch) // 2 : (img.shape[0] + ch) // 2]
if img.ndim == 2:
img = img[:, :, np.newaxis].repeat(3, axis=2)
img = PIL.Image.fromarray(img, 'RGB')
img = img.resize((width, height), PIL.Image.Resampling.LANCZOS)
img = np.array(img)
canvas = np.zeros([width, width, 3], dtype=np.uint8)
canvas[(width - height) // 2 : (width + height) // 2, :] = img
return canvas
if transform is None:
return functools.partial(scale, output_width, output_height)
if transform == 'center-crop':
if output_width is None or output_height is None:
raise click.ClickException('must specify --resolution=WxH when using ' + transform + 'transform')
return functools.partial(center_crop, output_width, output_height)
if transform == 'center-crop-wide':
if output_width is None or output_height is None:
raise click.ClickException('must specify --resolution=WxH when using ' + transform + ' transform')
return functools.partial(center_crop_wide, output_width, output_height)
assert False, 'unknown transform' | null |
180,980 | import functools
import PIL.Image
import gzip
import io
import json
import os
import pickle
import re
import sys
import tarfile
import zipfile
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import requests
import numpy as np
import click
from tqdm import tqdm
def file_ext(name: Union[str, Path]) -> str:
def open_image_folder(source_dir, *, max_images: Optional[int]):
def open_image_zip(source, *, max_images: Optional[int]):
def open_lmdb(lmdb_dir: str, *, max_images: Optional[int]):
def open_coco(source: str, *, max_images: Optional[int]):
def open_cifar10(tarball: str, *, max_images: Optional[int]):
def open_mnist(images_gz: str, *, max_images: Optional[int]):
labels_gz = images_gz.replace('-images-idx3-ubyte.gz', '-labels-idx1-ubyte.gz')
assert labels_gz != images_gz
images = []
labels = []
with gzip.open(images_gz, 'rb') as f:
images = np.frombuffer(f.read(), np.uint8, offset=16)
with gzip.open(labels_gz, 'rb') as f:
labels = np.frombuffer(f.read(), np.uint8, offset=8)
images = images.reshape(-1, 28, 28)
images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0)
assert images.shape == (60000, 32, 32) and images.dtype == np.uint8
assert labels.shape == (60000,) and labels.dtype == np.uint8
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 9
max_idx = maybe_min(len(images), max_images)
def iterate_images():
return max_idx, iterate_images()
def open_dataset(source, *, max_images: Optional[int]):
if os.path.isdir(source):
if source.rstrip('/').endswith('_lmdb'):
return open_lmdb(source, max_images=max_images)
else:
return open_image_folder(source, max_images=max_images)
elif os.path.isfile(source):
if os.path.basename(source) == 'cifar-10-python.tar.gz':
return open_cifar10(source, max_images=max_images)
elif os.path.basename(source) == 'captions_val2014.json':
return open_coco(source, max_images=max_images)
elif os.path.basename(source) == 'train-images-idx3-ubyte.gz':
return open_mnist(source, max_images=max_images)
elif file_ext(source) == 'zip':
return open_image_zip(source, max_images=max_images)
else:
assert False, 'unknown archive type'
else:
raise click.ClickException(f'Missing input file or directory: {source}') | null |
180,981 | import functools
import PIL.Image
import gzip
import io
import json
import os
import pickle
import re
import sys
import tarfile
import zipfile
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import requests
import numpy as np
import click
from tqdm import tqdm
def file_ext(name: Union[str, Path]) -> str:
def open_dest(dest: str) -> Tuple[str, Callable[[str, Union[bytes, str]], None], Callable[[], None]]:
dest_ext = file_ext(dest)
if dest_ext == 'zip':
if os.path.dirname(dest) != '':
os.makedirs(os.path.dirname(dest), exist_ok=True)
zf = zipfile.ZipFile(file=dest, mode='w', compression=zipfile.ZIP_STORED)
def zip_write_bytes(fname: str, data: Union[bytes, str]):
zf.writestr(fname, data)
return '', zip_write_bytes, zf.close
else:
# If the output folder already exists, check that is is
# empty.
#
# Note: creating the output directory is not strictly
# necessary as folder_write_bytes() also mkdirs, but it's better
# to give an error message earlier in case the dest folder
# somehow cannot be created.
if os.path.isdir(dest) and len(os.listdir(dest)) != 0:
raise click.ClickException('--dest folder must be empty')
os.makedirs(dest, exist_ok=True)
def folder_write_bytes(fname: str, data: Union[bytes, str]):
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, 'wb') as fout:
if isinstance(data, str):
data = data.encode('utf8')
fout.write(data)
return dest, folder_write_bytes, lambda: None | null |
180,982 | import tensorflow as tf
from absl import app, flags, logging
from absl.flags import FLAGS
import numpy as np
import cv2
from core.yolov4 import YOLOv4, YOLOv3, YOLOv3_tiny, decode
import core.utils as utils
import os
from core.config import cfg
def representative_data_gen():
fimage = open(FLAGS.dataset).read().split()
for input_value in range(10):
if os.path.exists(fimage[input_value]):
original_image=cv2.imread(fimage[input_value])
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = utils.image_preprocess(np.copy(original_image), [FLAGS.input_size, FLAGS.input_size])
img_in = image_data[np.newaxis, ...].astype(np.float32)
print("calibration image {}".format(fimage[input_value]))
yield [img_in]
else:
continue
def save_tflite():
converter = tf.lite.TFLiteConverter.from_saved_model(FLAGS.weights)
if FLAGS.quantize_mode == 'float16':
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.compat.v1.lite.constants.FLOAT16]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter.allow_custom_ops = True
elif FLAGS.quantize_mode == 'int8':
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter.allow_custom_ops = True
converter.representative_dataset = representative_data_gen
tflite_model = converter.convert()
open(FLAGS.output, 'wb').write(tflite_model)
logging.info("model saved to: {}".format(FLAGS.output)) | null |
180,983 | import tensorflow as tf
from absl import app, flags, logging
from absl.flags import FLAGS
import numpy as np
import cv2
from core.yolov4 import YOLOv4, YOLOv3, YOLOv3_tiny, decode
import core.utils as utils
import os
from core.config import cfg
def demo():
interpreter = tf.lite.Interpreter(model_path=FLAGS.output)
interpreter.allocate_tensors()
logging.info('tflite model loaded')
input_details = interpreter.get_input_details()
print(input_details)
output_details = interpreter.get_output_details()
print(output_details)
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
print(output_data) | null |
180,984 | import sys
import os
import glob
import argparse
The provided code snippet includes necessary dependencies for implementing the `query_yes_no` function. Write a Python function `def query_yes_no(question, default="yes", bypass=False)` to solve the following problem:
Ask a yes/no question via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits <Enter>. It must be "yes" (the default), "no" or None (meaning an answer is required of the user). The "answer" return value is True for "yes" or False for "no".
Here is the function:
def query_yes_no(question, default="yes", bypass=False):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
if bypass:
break
if sys.version_info[0] == 3:
choice = input().lower() # if version 3 of Python
else:
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n") | Ask a yes/no question via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits <Enter>. It must be "yes" (the default), "no" or None (meaning an answer is required of the user). The "answer" return value is True for "yes" or False for "no". |
180,985 | import sys
import os
import glob
import argparse
with open('../../data/classes/coco.names') as f:
for line in f:
current_class_name = line.rstrip("\n")
new_class_name = line.replace(' ', args.delimiter).rstrip("\n")
if current_class_name == new_class_name:
continue
y_n_message = ("Are you sure you want "
"to rename the class "
"\"" + current_class_name + "\" "
"into \"" + new_class_name + "\"?"
)
if query_yes_no(y_n_message, bypass=args.yes):
os.chdir("../ground-truth")
rename_class(current_class_name, new_class_name)
os.chdir("../predicted")
rename_class(current_class_name, new_class_name)
def rename_class(current_class_name, new_class_name):
# get list of txt files
file_list = glob.glob('*.txt')
file_list.sort()
# iterate through the txt files
for txt_file in file_list:
class_found = False
# open txt file lines to a list
with open(txt_file) as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
new_content = []
# go through each line of eache file
for line in content:
#class_name = line.split()[0]
if current_class_name in line:
class_found = True
line = line.replace(current_class_name, new_class_name)
new_content.append(line)
if class_found:
# rewrite file
with open(txt_file, 'w') as new_f:
for line in new_content:
new_f.write("%s\n" % line) | null |
180,986 | import sys
import os
import glob
os.chdir(path_to_gt)
os.chdir(path_to_pred)
print('total ground-truth files:', len(gt_files))
print('total predicted files:', len(pred_files))
print()
print('total intersected files:', len(intersection))
print("Intersection completed!")
def backup(src_folder, backup_files, backup_folder):
# non-intersection files (txt format) will be moved to a backup folder
if not backup_files:
print('No backup required for', src_folder)
return
os.chdir(src_folder)
## create the backup dir if it doesn't exist already
if not os.path.exists(backup_folder):
os.makedirs(backup_folder)
for file in backup_files:
os.rename(file, backup_folder + '/' + file) | null |
180,987 | import glob
import json
import os
import shutil
import operator
import sys
import argparse
from absl import app, flags, logging
from absl.flags import FLAGS
def error(msg):
print(msg)
sys.exit(0) | null |
180,988 | import glob
import json
import os
import shutil
import operator
import sys
import argparse
from absl import app, flags, logging
from absl.flags import FLAGS
def is_float_between_0_and_1(value):
try:
val = float(value)
if val > 0.0 and val < 1.0:
return True
else:
return False
except ValueError:
return False | null |
180,989 | import glob
import json
import os
import shutil
import operator
import sys
import argparse
from absl import app, flags, logging
from absl.flags import FLAGS
if len(ground_truth_files_list) == 0:
error("Error: No ground-truth files found!")
The provided code snippet includes necessary dependencies for implementing the `voc_ap` function. Write a Python function `def voc_ap(rec, prec)` to solve the following problem:
--- Official matlab code VOC2012--- mrec=[0 ; rec ; 1]; mpre=[0 ; prec ; 0]; for i=numel(mpre)-1:-1:1 mpre(i)=max(mpre(i),mpre(i+1)); end i=find(mrec(2:end)~=mrec(1:end-1))+1; ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
Here is the function:
def voc_ap(rec, prec):
"""
--- Official matlab code VOC2012---
mrec=[0 ; rec ; 1];
mpre=[0 ; prec ; 0];
for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
end
i=find(mrec(2:end)~=mrec(1:end-1))+1;
ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
rec.insert(0, 0.0) # insert 0.0 at begining of list
rec.append(1.0) # insert 1.0 at end of list
mrec = rec[:]
prec.insert(0, 0.0) # insert 0.0 at begining of list
prec.append(0.0) # insert 0.0 at end of list
mpre = prec[:]
"""
This part makes the precision monotonically decreasing
(goes from the end to the beginning)
matlab: for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
"""
# matlab indexes start in 1 but python in 0, so I have to do:
# range(start=(len(mpre) - 2), end=0, step=-1)
# also the python function range excludes the end, resulting in:
# range(start=(len(mpre) - 2), end=-1, step=-1)
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
"""
This part creates a list of indexes where the recall changes
matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;
"""
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i-1]:
i_list.append(i) # if it was matlab would be i + 1
"""
The Average Precision (AP) is the area under the curve
(numerical integration)
matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
ap = 0.0
for i in i_list:
ap += ((mrec[i]-mrec[i-1])*mpre[i])
return ap, mrec, mpre | --- Official matlab code VOC2012--- mrec=[0 ; rec ; 1]; mpre=[0 ; prec ; 0]; for i=numel(mpre)-1:-1:1 mpre(i)=max(mpre(i),mpre(i+1)); end i=find(mrec(2:end)~=mrec(1:end-1))+1; ap=sum((mrec(i)-mrec(i-1)).*mpre(i)); |
180,990 | import glob
import json
import os
import shutil
import operator
import sys
import argparse
from absl import app, flags, logging
from absl.flags import FLAGS
with open(results_files_path + "/results.txt", 'w') as results_file:
results_file.write("# AP and precision/recall per class\n")
count_true_positives = {}
for class_index, class_name in enumerate(gt_classes):
count_true_positives[class_name] = 0
"""
Load predictions of that class
"""
predictions_file = tmp_files_path + "/" + class_name + "_predictions.json"
predictions_data = json.load(open(predictions_file))
"""
Assign predictions to ground truth objects
"""
nd = len(predictions_data)
tp = [0] * nd # creates an array of zeros of size nd
fp = [0] * nd
for idx, prediction in enumerate(predictions_data):
file_id = prediction["file_id"]
if show_animation:
# find ground truth image
ground_truth_img = glob.glob1(img_path, file_id + ".*")
#tifCounter = len(glob.glob1(myPath,"*.tif"))
if len(ground_truth_img) == 0:
error("Error. Image not found with id: " + file_id)
elif len(ground_truth_img) > 1:
error("Error. Multiple image with id: " + file_id)
else: # found image
#print(img_path + "/" + ground_truth_img[0])
# Load image
img = cv2.imread(img_path + "/" + ground_truth_img[0])
# load image with draws of multiple detections
img_cumulative_path = results_files_path + "/images/" + ground_truth_img[0]
if os.path.isfile(img_cumulative_path):
img_cumulative = cv2.imread(img_cumulative_path)
else:
img_cumulative = img.copy()
# Add bottom border to image
bottom_border = 60
BLACK = [0, 0, 0]
img = cv2.copyMakeBorder(img, 0, bottom_border, 0, 0, cv2.BORDER_CONSTANT, value=BLACK)
# assign prediction to ground truth object if any
# open ground-truth with that file_id
gt_file = tmp_files_path + "/" + file_id + "_ground_truth.json"
ground_truth_data = json.load(open(gt_file))
ovmax = -1
gt_match = -1
# load prediction bounding-box
bb = [ float(x) for x in prediction["bbox"].split() ]
for obj in ground_truth_data:
# look for a class_name match
if obj["class_name"] == class_name:
bbgt = [ float(x) for x in obj["bbox"].split() ]
bi = [max(bb[0],bbgt[0]), max(bb[1],bbgt[1]), min(bb[2],bbgt[2]), min(bb[3],bbgt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU) = area of intersection / area of union
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0]
+ 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih
ov = iw * ih / ua
if ov > ovmax:
ovmax = ov
gt_match = obj
# assign prediction as true positive/don't care/false positive
if show_animation:
status = "NO MATCH FOUND!" # status is only used in the animation
# set minimum overlap
min_overlap = MINOVERLAP
if specific_iou_flagged:
if class_name in specific_iou_classes:
index = specific_iou_classes.index(class_name)
min_overlap = float(iou_list[index])
if ovmax >= min_overlap:
if "difficult" not in gt_match:
if not bool(gt_match["used"]):
# true positive
tp[idx] = 1
gt_match["used"] = True
count_true_positives[class_name] += 1
# update the ".json" file
with open(gt_file, 'w') as f:
f.write(json.dumps(ground_truth_data))
if show_animation:
status = "MATCH!"
else:
# false positive (multiple detection)
fp[idx] = 1
if show_animation:
status = "REPEATED MATCH!"
else:
# false positive
fp[idx] = 1
if ovmax > 0:
status = "INSUFFICIENT OVERLAP"
"""
Draw image to show animation
"""
if show_animation:
height, widht = img.shape[:2]
# colors (OpenCV works with BGR)
white = (255,255,255)
light_blue = (255,200,100)
green = (0,255,0)
light_red = (30,30,255)
# 1st line
margin = 10
v_pos = int(height - margin - (bottom_border / 2))
text = "Image: " + ground_truth_img[0] + " "
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
text = "Class [" + str(class_index) + "/" + str(n_classes) + "]: " + class_name + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), light_blue, line_width)
if ovmax != -1:
color = light_red
if status == "INSUFFICIENT OVERLAP":
text = "IoU: {0:.2f}% ".format(ovmax*100) + "< {0:.2f}% ".format(min_overlap*100)
else:
text = "IoU: {0:.2f}% ".format(ovmax*100) + ">= {0:.2f}% ".format(min_overlap*100)
color = green
img, _ = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
# 2nd line
v_pos += int(bottom_border / 2)
rank_pos = str(idx+1) # rank position (idx starts at 0)
text = "Prediction #rank: " + rank_pos + " confidence: {0:.2f}% ".format(float(prediction["confidence"])*100)
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
color = light_red
if status == "MATCH!":
color = green
text = "Result: " + status + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
font = cv2.FONT_HERSHEY_SIMPLEX
if ovmax > 0: # if there is intersections between the bounding-boxes
bbgt = [ int(x) for x in gt_match["bbox"].split() ]
cv2.rectangle(img,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.rectangle(img_cumulative,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.putText(img_cumulative, class_name, (bbgt[0],bbgt[1] - 5), font, 0.6, light_blue, 1, cv2.LINE_AA)
bb = [int(i) for i in bb]
cv2.rectangle(img,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.rectangle(img_cumulative,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.putText(img_cumulative, class_name, (bb[0],bb[1] - 5), font, 0.6, color, 1, cv2.LINE_AA)
# show image
cv2.imshow("Animation", img)
cv2.waitKey(20) # show for 20 ms
# save image to results
output_img_path = results_files_path + "/images/single_predictions/" + class_name + "_prediction" + str(idx) + ".jpg"
cv2.imwrite(output_img_path, img)
# save the image with all the objects drawn to it
cv2.imwrite(img_cumulative_path, img_cumulative)
#print(tp)
# compute precision/recall
cumsum = 0
for idx, val in enumerate(fp):
fp[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(tp):
tp[idx] += cumsum
cumsum += val
#print(tp)
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name]
#print(rec)
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
#print(prec)
ap, mrec, mprec = voc_ap(rec, prec)
sum_AP += ap
text = "{0:.2f}%".format(ap*100) + " = " + class_name + " AP " #class_name + " AP = {0:.2f}%".format(ap*100)
"""
Write to results.txt
"""
rounded_prec = [ '%.2f' % elem for elem in prec ]
rounded_rec = [ '%.2f' % elem for elem in rec ]
results_file.write(text + "\n Precision: " + str(rounded_prec) + "\n Recall :" + str(rounded_rec) + "\n\n")
if not args.quiet:
print(text)
ap_dictionary[class_name] = ap
"""
Draw plot
"""
if draw_plot:
plt.plot(rec, prec, '-o')
# add a new penultimate point to the list (mrec[-2], 0.0)
# since the last line segment (and respective area) do not affect the AP value
area_under_curve_x = mrec[:-1] + [mrec[-2]] + [mrec[-1]]
area_under_curve_y = mprec[:-1] + [0.0] + [mprec[-1]]
plt.fill_between(area_under_curve_x, 0, area_under_curve_y, alpha=0.2, edgecolor='r')
# set window title
fig = plt.gcf() # gcf - get current figure
fig.canvas.set_window_title('AP ' + class_name)
# set plot title
plt.title('class: ' + text)
#plt.suptitle('This is a somewhat long figure title', fontsize=16)
# set axis titles
plt.xlabel('Recall')
plt.ylabel('Precision')
# optional - set axes
axes = plt.gca() # gca - get current axes
axes.set_xlim([0.0,1.0])
axes.set_ylim([0.0,1.05]) # .05 to give some extra space
# Alternative option -> wait for button to be pressed
#while not plt.waitforbuttonpress(): pass # wait for key display
# Alternative option -> normal display
#plt.show()
# save the plot
fig.savefig(results_files_path + "/classes/" + class_name + ".png")
plt.cla() # clear axes for next plot
if show_animation:
cv2.destroyAllWindows()
results_file.write("\n# mAP of all classes\n")
mAP = sum_AP / n_classes
text = "mAP = {0:.2f}%".format(mAP*100)
results_file.write(text + "\n")
print(text)
with open(results_files_path + "/results.txt", 'a') as results_file:
results_file.write("\n# Number of ground-truth objects per class\n")
for class_name in sorted(gt_counter_per_class):
results_file.write(class_name + ": " + str(gt_counter_per_class[class_name]) + "\n")
with open(results_files_path + "/results", 'a') as results_file:
results_file.write("\n# Number of predicted objects per class\n")
for class_name in sorted(pred_classes):
n_pred = pred_counter_per_class[class_name]
text = class_name + ": " + str(n_pred)
text += " (tp:" + str(count_true_positives[class_name]) + ""
text += ", fp:" + str(n_pred - count_true_positives[class_name]) + ")\n"
results_file.write(text)
def file_lines_to_list(path):
# open txt file lines to a list
with open(path) as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
return content | null |
180,991 | import glob
import json
import os
import shutil
import operator
import sys
import argparse
from absl import app, flags, logging
from absl.flags import FLAGS
def draw_text_in_image(img, text, pos, color, line_width):
font = cv2.FONT_HERSHEY_PLAIN
fontScale = 1
lineType = 1
bottomLeftCornerOfText = pos
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
color,
lineType)
text_width, _ = cv2.getTextSize(text, font, fontScale, lineType)[0]
return img, (line_width + text_width) | null |
180,992 | import glob
import json
import os
import shutil
import operator
import sys
import argparse
from absl import app, flags, logging
from absl.flags import FLAGS
def adjust_axes(r, t, fig, axes):
# get text width for re-scaling
bb = t.get_window_extent(renderer=r)
text_width_inches = bb.width / fig.dpi
# get axis width in inches
current_fig_width = fig.get_figwidth()
new_fig_width = current_fig_width + text_width_inches
propotion = new_fig_width / current_fig_width
# get axis limit
x_lim = axes.get_xlim()
axes.set_xlim([x_lim[0], x_lim[1]*propotion])
if len(ground_truth_files_list) == 0:
error("Error: No ground-truth files found!")
The provided code snippet includes necessary dependencies for implementing the `draw_plot_func` function. Write a Python function `def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar)` to solve the following problem:
Re-scale height accordingly
Here is the function:
def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar):
# sort the dictionary by decreasing value, into a list of tuples
sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))
# unpacking the list of tuples into two lists
sorted_keys, sorted_values = zip(*sorted_dic_by_value)
#
if true_p_bar != "":
"""
Special case to draw in (green=true predictions) & (red=false predictions)
"""
fp_sorted = []
tp_sorted = []
for key in sorted_keys:
fp_sorted.append(dictionary[key] - true_p_bar[key])
tp_sorted.append(true_p_bar[key])
plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Predictions')
plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Predictions', left=fp_sorted)
# add legend
plt.legend(loc='lower right')
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
fp_val = fp_sorted[i]
tp_val = tp_sorted[i]
fp_str_val = " " + str(fp_val)
tp_str_val = fp_str_val + " " + str(tp_val)
# trick to paint multicolor with offset:
# first paint everything and then repaint the first number
t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')
plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
else:
plt.barh(range(n_classes), sorted_values, color=plot_color)
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
str_val = " " + str(val) # add a space before
if val < 1.0:
str_val = " {0:.2f}".format(val)
t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')
# re-set axes to show number inside the figure
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
# set window title
fig.canvas.set_window_title(window_title)
# write classes in y axis
tick_font_size = 12
plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)
"""
Re-scale height accordingly
"""
init_height = fig.get_figheight()
# comput the matrix height in points and inches
dpi = fig.dpi
height_pt = n_classes * (tick_font_size * 1.4) # 1.4 (some spacing)
height_in = height_pt / dpi
# compute the required figure height
top_margin = 0.15 # in percentage of the figure height
bottom_margin = 0.05 # in percentage of the figure height
figure_height = height_in / (1 - top_margin - bottom_margin)
# set new height
if figure_height > init_height:
fig.set_figheight(figure_height)
# set plot title
plt.title(plot_title, fontsize=14)
# set axis titles
# plt.xlabel('classes')
plt.xlabel(x_label, fontsize='large')
# adjust size of window
fig.tight_layout()
# save the plot
fig.savefig(output_path)
# show image
if to_show:
plt.show()
# close the plot
plt.close() | Re-scale height accordingly |
180,993 | import cv2
import random
import colorsys
import numpy as np
import tensorflow as tf
from core.config import cfg
def load_freeze_layer(model='yolov4', tiny=False):
if tiny:
if model == 'yolov3':
freeze_layouts = ['conv2d_9', 'conv2d_12']
else:
freeze_layouts = ['conv2d_17', 'conv2d_20']
else:
if model == 'yolov3':
freeze_layouts = ['conv2d_58', 'conv2d_66', 'conv2d_74']
else:
freeze_layouts = ['conv2d_93', 'conv2d_101', 'conv2d_109']
return freeze_layouts | null |
180,994 | import cv2
import random
import colorsys
import numpy as np
import tensorflow as tf
from core.config import cfg
def read_class_names(class_file_name):
names = {}
with open(class_file_name, 'r') as data:
for ID, name in enumerate(data):
names[ID] = name.strip('\n')
return names
cfg = __C
def draw_bbox(image, bboxes, classes=read_class_names(cfg.YOLO.CLASSES), show_label=True):
num_classes = len(classes)
image_h, image_w, _ = image.shape
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(0)
random.shuffle(colors)
random.seed(None)
out_boxes, out_scores, out_classes, num_boxes = bboxes
for i in range(num_boxes[0]):
if int(out_classes[0][i]) < 0 or int(out_classes[0][i]) > num_classes: continue
coor = out_boxes[0][i]
coor[0] = int(coor[0] * image_h)
coor[2] = int(coor[2] * image_h)
coor[1] = int(coor[1] * image_w)
coor[3] = int(coor[3] * image_w)
fontScale = 0.5
score = out_scores[0][i]
class_ind = int(out_classes[0][i])
bbox_color = colors[class_ind]
bbox_thick = int(0.6 * (image_h + image_w) / 600)
c1, c2 = (coor[1], coor[0]), (coor[3], coor[2])
cv2.rectangle(image, c1, c2, bbox_color, bbox_thick)
if show_label:
bbox_mess = '%s: %.2f' % (classes[class_ind], score)
t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0]
c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3)
cv2.rectangle(image, c1, (np.float32(c3[0]), np.float32(c3[1])), bbox_color, -1) #filled
cv2.putText(image, bbox_mess, (c1[0], np.float32(c1[1] - 2)), cv2.FONT_HERSHEY_SIMPLEX,
fontScale, (0, 0, 0), bbox_thick // 2, lineType=cv2.LINE_AA)
return image | null |
180,995 | import cv2
import random
import colorsys
import numpy as np
import tensorflow as tf
from core.config import cfg
The provided code snippet includes necessary dependencies for implementing the `bbox_ciou` function. Write a Python function `def bbox_ciou(bboxes1, bboxes2)` to solve the following problem:
Complete IoU @param bboxes1: (a, b, ..., 4) @param bboxes2: (A, B, ..., 4) x:X is 1:n or n:n or n:1 @return (max(a,A), max(b,B), ...) ex) (4,):(3,4) -> (3,) (2,1,4):(2,3,4) -> (2,3)
Here is the function:
def bbox_ciou(bboxes1, bboxes2):
"""
Complete IoU
@param bboxes1: (a, b, ..., 4)
@param bboxes2: (A, B, ..., 4)
x:X is 1:n or n:n or n:1
@return (max(a,A), max(b,B), ...)
ex) (4,):(3,4) -> (3,)
(2,1,4):(2,3,4) -> (2,3)
"""
bboxes1_area = bboxes1[..., 2] * bboxes1[..., 3]
bboxes2_area = bboxes2[..., 2] * bboxes2[..., 3]
bboxes1_coor = tf.concat(
[
bboxes1[..., :2] - bboxes1[..., 2:] * 0.5,
bboxes1[..., :2] + bboxes1[..., 2:] * 0.5,
],
axis=-1,
)
bboxes2_coor = tf.concat(
[
bboxes2[..., :2] - bboxes2[..., 2:] * 0.5,
bboxes2[..., :2] + bboxes2[..., 2:] * 0.5,
],
axis=-1,
)
left_up = tf.maximum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])
right_down = tf.minimum(bboxes1_coor[..., 2:], bboxes2_coor[..., 2:])
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = bboxes1_area + bboxes2_area - inter_area
iou = tf.math.divide_no_nan(inter_area, union_area)
enclose_left_up = tf.minimum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])
enclose_right_down = tf.maximum(
bboxes1_coor[..., 2:], bboxes2_coor[..., 2:]
)
enclose_section = enclose_right_down - enclose_left_up
c_2 = enclose_section[..., 0] ** 2 + enclose_section[..., 1] ** 2
center_diagonal = bboxes2[..., :2] - bboxes1[..., :2]
rho_2 = center_diagonal[..., 0] ** 2 + center_diagonal[..., 1] ** 2
diou = iou - tf.math.divide_no_nan(rho_2, c_2)
v = (
(
tf.math.atan(
tf.math.divide_no_nan(bboxes1[..., 2], bboxes1[..., 3])
)
- tf.math.atan(
tf.math.divide_no_nan(bboxes2[..., 2], bboxes2[..., 3])
)
)
* 2
/ np.pi
) ** 2
alpha = tf.math.divide_no_nan(v, 1 - iou + v)
ciou = diou - alpha * v
return ciou | Complete IoU @param bboxes1: (a, b, ..., 4) @param bboxes2: (A, B, ..., 4) x:X is 1:n or n:n or n:1 @return (max(a,A), max(b,B), ...) ex) (4,):(3,4) -> (3,) (2,1,4):(2,3,4) -> (2,3) |
180,996 | import cv2
import random
import colorsys
import numpy as np
import tensorflow as tf
from core.config import cfg
def bbox_iou(bboxes1, bboxes2):
"""
x:X is 1:n or n:n or n:1
ex) (4,):(3,4) -> (3,)
(2,1,4):(2,3,4) -> (2,3)
"""
bboxes1_area = bboxes1[..., 2] * bboxes1[..., 3]
bboxes2_area = bboxes2[..., 2] * bboxes2[..., 3]
bboxes1_coor = tf.concat(
[
bboxes1[..., :2] - bboxes1[..., 2:] * 0.5,
bboxes1[..., :2] + bboxes1[..., 2:] * 0.5,
],
axis=-1,
)
bboxes2_coor = tf.concat(
[
bboxes2[..., :2] - bboxes2[..., 2:] * 0.5,
bboxes2[..., :2] + bboxes2[..., 2:] * 0.5,
],
axis=-1,
)
left_up = tf.maximum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])
right_down = tf.minimum(bboxes1_coor[..., 2:], bboxes2_coor[..., 2:])
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = bboxes1_area + bboxes2_area - inter_area
iou = tf.math.divide_no_nan(inter_area, union_area)
return iou
The provided code snippet includes necessary dependencies for implementing the `nms` function. Write a Python function `def nms(bboxes, iou_threshold, sigma=0.3, method='nms')` to solve the following problem:
:param bboxes: (xmin, ymin, xmax, ymax, score, class) Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf https://github.com/bharatsingh430/soft-nms
Here is the function:
def nms(bboxes, iou_threshold, sigma=0.3, method='nms'):
"""
:param bboxes: (xmin, ymin, xmax, ymax, score, class)
Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf
https://github.com/bharatsingh430/soft-nms
"""
classes_in_img = list(set(bboxes[:, 5]))
best_bboxes = []
for cls in classes_in_img:
cls_mask = (bboxes[:, 5] == cls)
cls_bboxes = bboxes[cls_mask]
while len(cls_bboxes) > 0:
max_ind = np.argmax(cls_bboxes[:, 4])
best_bbox = cls_bboxes[max_ind]
best_bboxes.append(best_bbox)
cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])
iou = bbox_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])
weight = np.ones((len(iou),), dtype=np.float32)
assert method in ['nms', 'soft-nms']
if method == 'nms':
iou_mask = iou > iou_threshold
weight[iou_mask] = 0.0
if method == 'soft-nms':
weight = np.exp(-(1.0 * iou ** 2 / sigma))
cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight
score_mask = cls_bboxes[:, 4] > 0.
cls_bboxes = cls_bboxes[score_mask]
return best_bboxes | :param bboxes: (xmin, ymin, xmax, ymax, score, class) Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf https://github.com/bharatsingh430/soft-nms |
180,997 | import cv2
import random
import colorsys
import numpy as np
import tensorflow as tf
from core.config import cfg
def freeze_all(model, frozen=True):
model.trainable = not frozen
if isinstance(model, tf.keras.Model):
for l in model.layers:
freeze_all(l, frozen) | null |
180,998 | import cv2
import random
import colorsys
import numpy as np
import tensorflow as tf
from core.config import cfg
def unfreeze_all(model, frozen=False):
model.trainable = not frozen
if isinstance(model, tf.keras.Model):
for l in model.layers:
unfreeze_all(l, frozen) | null |
180,999 | import numpy as np
import tensorflow as tf
import core.utils as utils
import core.common as common
import core.backbone as backbone
from core.config import cfg
def decode_train(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=0, XYSCALE=[1, 1, 1]):
conv_output = tf.reshape(conv_output,
(tf.shape(conv_output)[0], output_size, output_size, 3, 5 + NUM_CLASS))
conv_raw_dxdy, conv_raw_dwdh, conv_raw_conf, conv_raw_prob = tf.split(conv_output, (2, 2, 1, NUM_CLASS),
axis=-1)
xy_grid = tf.meshgrid(tf.range(output_size), tf.range(output_size))
xy_grid = tf.expand_dims(tf.stack(xy_grid, axis=-1), axis=2) # [gx, gy, 1, 2]
xy_grid = tf.tile(tf.expand_dims(xy_grid, axis=0), [tf.shape(conv_output)[0], 1, 1, 3, 1])
xy_grid = tf.cast(xy_grid, tf.float32)
pred_xy = ((tf.sigmoid(conv_raw_dxdy) * XYSCALE[i]) - 0.5 * (XYSCALE[i] - 1) + xy_grid) * \
STRIDES[i]
pred_wh = (tf.exp(conv_raw_dwdh) * ANCHORS[i])
pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1)
pred_conf = tf.sigmoid(conv_raw_conf)
pred_prob = tf.sigmoid(conv_raw_prob)
return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1) | null |
181,000 | import numpy as np
import tensorflow as tf
import core.utils as utils
import core.common as common
import core.backbone as backbone
from core.config import cfg
def compute_loss(pred, conv, label, bboxes, STRIDES, NUM_CLASS, IOU_LOSS_THRESH, i=0):
conv_shape = tf.shape(conv)
batch_size = conv_shape[0]
output_size = conv_shape[1]
input_size = STRIDES[i] * output_size
conv = tf.reshape(conv, (batch_size, output_size, output_size, 3, 5 + NUM_CLASS))
conv_raw_conf = conv[:, :, :, :, 4:5]
conv_raw_prob = conv[:, :, :, :, 5:]
pred_xywh = pred[:, :, :, :, 0:4]
pred_conf = pred[:, :, :, :, 4:5]
label_xywh = label[:, :, :, :, 0:4]
respond_bbox = label[:, :, :, :, 4:5]
label_prob = label[:, :, :, :, 5:]
giou = tf.expand_dims(utils.bbox_giou(pred_xywh, label_xywh), axis=-1)
input_size = tf.cast(input_size, tf.float32)
bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (input_size ** 2)
giou_loss = respond_bbox * bbox_loss_scale * (1- giou)
iou = utils.bbox_iou(pred_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :])
max_iou = tf.expand_dims(tf.reduce_max(iou, axis=-1), axis=-1)
respond_bgd = (1.0 - respond_bbox) * tf.cast( max_iou < IOU_LOSS_THRESH, tf.float32 )
conf_focal = tf.pow(respond_bbox - pred_conf, 2)
conf_loss = conf_focal * (
respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf)
+
respond_bgd * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf)
)
prob_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_prob, logits=conv_raw_prob)
giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis=[1,2,3,4]))
conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis=[1,2,3,4]))
prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis=[1,2,3,4]))
return giou_loss, conf_loss, prob_loss | null |
181,001 | import tensorflow as tf
from absl import app, flags, logging
from absl.flags import FLAGS
from core.yolov4 import YOLO, decode, filter_boxes
import core.utils as utils
from core.config import cfg
def YOLO(input_layer, NUM_CLASS, model='yolov4', is_tiny=False):
if is_tiny:
if model == 'yolov4':
return YOLOv4_tiny(input_layer, NUM_CLASS)
elif model == 'yolov3':
return YOLOv3_tiny(input_layer, NUM_CLASS)
else:
if model == 'yolov4':
return YOLOv4(input_layer, NUM_CLASS)
elif model == 'yolov3':
return YOLOv3(input_layer, NUM_CLASS)
def decode(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE=[1,1,1], FRAMEWORK='tf'):
if FRAMEWORK == 'trt':
return decode_trt(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=i, XYSCALE=XYSCALE)
elif FRAMEWORK == 'tflite':
return decode_tflite(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=i, XYSCALE=XYSCALE)
else:
return decode_tf(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=i, XYSCALE=XYSCALE)
def filter_boxes(box_xywh, scores, score_threshold=0.4, input_shape = tf.constant([416,416])):
scores_max = tf.math.reduce_max(scores, axis=-1)
mask = scores_max >= score_threshold
class_boxes = tf.boolean_mask(box_xywh, mask)
pred_conf = tf.boolean_mask(scores, mask)
class_boxes = tf.reshape(class_boxes, [tf.shape(scores)[0], -1, tf.shape(class_boxes)[-1]])
pred_conf = tf.reshape(pred_conf, [tf.shape(scores)[0], -1, tf.shape(pred_conf)[-1]])
box_xy, box_wh = tf.split(class_boxes, (2, 2), axis=-1)
input_shape = tf.cast(input_shape, dtype=tf.float32)
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
box_mins = (box_yx - (box_hw / 2.)) / input_shape
box_maxes = (box_yx + (box_hw / 2.)) / input_shape
boxes = tf.concat([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
], axis=-1)
# return tf.concat([boxes, pred_conf], axis=-1)
return (boxes, pred_conf)
def save_tf():
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
input_layer = tf.keras.layers.Input([FLAGS.input_size, FLAGS.input_size, 3])
feature_maps = YOLO(input_layer, NUM_CLASS, FLAGS.model, FLAGS.tiny)
bbox_tensors = []
prob_tensors = []
if FLAGS.tiny:
for i, fm in enumerate(feature_maps):
if i == 0:
output_tensors = decode(fm, FLAGS.input_size // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE, FLAGS.framework)
else:
output_tensors = decode(fm, FLAGS.input_size // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE, FLAGS.framework)
bbox_tensors.append(output_tensors[0])
prob_tensors.append(output_tensors[1])
else:
for i, fm in enumerate(feature_maps):
if i == 0:
output_tensors = decode(fm, FLAGS.input_size // 8, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE, FLAGS.framework)
elif i == 1:
output_tensors = decode(fm, FLAGS.input_size // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE, FLAGS.framework)
else:
output_tensors = decode(fm, FLAGS.input_size // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE, FLAGS.framework)
bbox_tensors.append(output_tensors[0])
prob_tensors.append(output_tensors[1])
pred_bbox = tf.concat(bbox_tensors, axis=1)
pred_prob = tf.concat(prob_tensors, axis=1)
if FLAGS.framework == 'tflite':
pred = (pred_bbox, pred_prob)
else:
boxes, pred_conf = filter_boxes(pred_bbox, pred_prob, score_threshold=FLAGS.score_thres, input_shape=tf.constant([FLAGS.input_size, FLAGS.input_size]))
pred = tf.concat([boxes, pred_conf], axis=-1)
model = tf.keras.Model(input_layer, pred)
utils.load_weights(model, FLAGS.weights, FLAGS.model, FLAGS.tiny)
model.summary()
model.save(FLAGS.output) | null |
181,002 | import sys
import os
from absl import app, flags
from absl.flags import FLAGS
from lxml import etree
def convert_annotation(list_txt, output_path, image_dir, anno_dir, class_names):
IMAGE_EXT = '.jpg'
ANNO_EXT = '.xml'
with open(list_txt, 'r') as f, open(output_path, 'w') as wf:
while True:
line = f.readline().strip()
if line is None or not line:
break
im_p = os.path.join(image_dir, line + IMAGE_EXT)
an_p = os.path.join(anno_dir, line + ANNO_EXT)
# Get annotation.
root = etree.parse(an_p).getroot()
bboxes = root.xpath('//object/bndbox')
names = root.xpath('//object/name')
box_annotations = []
for b, n in zip(bboxes, names):
name = n.text
class_idx = class_names.index(name)
xmin = b.find('xmin').text
ymin = b.find('ymin').text
xmax = b.find('xmax').text
ymax = b.find('ymax').text
box_annotations.append(','.join([str(xmin), str(ymin), str(xmax), str(ymax), str(class_idx)]))
annotation = os.path.abspath(im_p) + ' ' + ' '.join(box_annotations) + '\n'
wf.write(annotation)
def convert_voc(image_dir, anno_dir, train_list_txt, val_list_txt, classes, train_output, val_output, no_val):
IMAGE_EXT = '.jpg'
ANNO_EXT = '.xml'
class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
# Training set.
convert_annotation(train_list_txt, train_output, image_dir, anno_dir, class_names)
if no_val:
return
# Validation set.
convert_annotation(val_list_txt, val_output, image_dir, anno_dir, class_names) | null |
181,003 | import sys
import os
from absl import app, flags
from absl.flags import FLAGS
from lxml import etree
def make_names(anno_dir, output):
labels_dict = {}
anno_list = os.listdir(anno_dir)
for anno_file in anno_list:
p = os.path.join(anno_dir, anno_file)
# Get annotation.
root = etree.parse(p).getroot()
names = root.xpath('//object/name')
for n in names:
labels_dict[n.text] = 0
labels = list(labels_dict.keys())
labels.sort()
with open(output, 'w') as f:
for l in labels:
f.writelines(l + '\n')
print(f"Done making a names's file ({os.path.abspath(output)})") | null |
181,004 | from absl import app, flags, logging
import os
import pickle
from os import listdir
from os.path import isfile, join
from absl.flags import FLAGS
import cv2
def convert_annotation(output, data, data_type = "val"):
class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
replace_dict = {"couch": "sofa", "airplane": "aeroplane", "tv": "tvmonitor", "motorcycle": "motorbike"}
if os.path.exists(output): os.remove(output)
directory_path = os.path.join(FLAGS.coco_path, FLAGS.image_path)
# if data_type == "train":
# anno_path = directory_path + "/labels/train2014"
# image_path = os.path.join(directory_path, "trainvalno5k.txt")
# else:
# anno_path = directory_path + "/labels/val2014"
# image_path = os.path.join(directory_path, "5k.txt")
# with open(image_path) as f:
# image_paths = f.readlines()
# image_paths = [x.strip() for x in image_paths]
image_paths = [f for f in listdir(directory_path) if isfile(join(directory_path, f))]
check_classes = []
count = 0
with open(output, 'a') as f:
for image_path in image_paths:
image_inds = image_path.split(".")[0]
annotation = os.path.join(directory_path, image_path)
# if os.path.exists(os.path.join(anno_path, image_inds + ".txt")):
if image_inds in data:
objects = data[image_inds]["objects"]
for key, value in objects.items():
if key == 'num_obj': continue
if value["name"] not in class_names:
class_ind = replace_dict[value["name"]]
class_ind = class_names.index(class_ind)
# if value["name"] not in check_classes:
# check_classes.append(value["name"])
# print(value["name"])
# continue
else:
class_ind = class_names.index(value["name"])
xmin = int(value["bndbox"]["xmin"])
xmax = int(value["bndbox"]["xmax"])
ymin = int(value["bndbox"]["ymin"])
ymax = int(value["bndbox"]["ymax"])
annotation += ' ' + ','.join([str(xmin), str(ymin), str(xmax), str(ymax), str(class_ind)])
else: continue
f.write(annotation + "\n")
count += 1
# print(annotation)
print(count)
return | null |
181,005 | import os
import time
def gdrive_download(id='1HaXkef9z6y5l4vUnCYgdmEAj61c6bfWO', name='coco.zip'):
# https://gist.github.com/tanaikech/f0f2d122e05bf5f971611258c22c110f
# Downloads a file from Google Drive, accepting presented query
# from utils.google_utils import *; gdrive_download()
t = time.time()
print('Downloading https://drive.google.com/uc?export=download&id=%s as %s... ' % (id, name), end='')
os.remove(name) if os.path.exists(name) else None # remove existing
os.remove('cookie') if os.path.exists('cookie') else None
# Attempt file download
os.system("curl -c ./cookie -s -L \"https://drive.google.com/uc?export=download&id=%s\" > /dev/null" % id)
if os.path.exists('cookie'): # large file
s = "curl -Lb ./cookie \"https://drive.google.com/uc?export=download&confirm=`awk '/download/ {print $NF}' ./cookie`&id=%s\" -o %s" % (
id, name)
else: # small file
s = "curl -s -L -o %s 'https://drive.google.com/uc?export=download&id=%s'" % (name, id)
r = os.system(s) # execute, capture return values
os.remove('cookie') if os.path.exists('cookie') else None
# Error check
if r != 0:
os.remove(name) if os.path.exists(name) else None # remove partial
print('Download error ') # raise Exception('Download error')
return r
# Unzip if archive
if name.endswith('.zip'):
print('unzipping... ', end='')
os.system('unzip -q %s' % name) # unzip
os.remove(name) # remove zip to free space
print('Done (%.1fs)' % (time.time() - t))
return r | null |
181,006 | import os
import argparse
import xml.etree.ElementTree as ET
def convert_voc_annotation(data_path, data_type, anno_path, use_difficult_bbox=True):
classes = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
'train', 'tvmonitor']
img_inds_file = os.path.join(data_path, 'ImageSets', 'Main', data_type + '.txt')
with open(img_inds_file, 'r') as f:
txt = f.readlines()
image_inds = [line.strip() for line in txt]
with open(anno_path, 'a') as f:
for image_ind in image_inds:
image_path = os.path.join(data_path, 'JPEGImages', image_ind + '.jpg')
annotation = image_path
label_path = os.path.join(data_path, 'Annotations', image_ind + '.xml')
root = ET.parse(label_path).getroot()
objects = root.findall('object')
for obj in objects:
difficult = obj.find('difficult').text.strip()
if (not use_difficult_bbox) and(int(difficult) == 1):
continue
bbox = obj.find('bndbox')
class_ind = classes.index(obj.find('name').text.lower().strip())
xmin = bbox.find('xmin').text.strip()
xmax = bbox.find('xmax').text.strip()
ymin = bbox.find('ymin').text.strip()
ymax = bbox.find('ymax').text.strip()
annotation += ' ' + ','.join([xmin, ymin, xmax, ymax, str(class_ind)])
print(annotation)
f.write(annotation + "\n")
return len(image_inds) | null |
181,007 | from absl import app, flags, logging
from absl.flags import FLAGS
import tensorflow as tf
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
import numpy as np
import cv2
from tensorflow.python.compiler.tensorrt import trt_convert as trt
import core.utils as utils
from tensorflow.python.saved_model import signature_constants
import os
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
def representative_data_gen():
fimage = open(FLAGS.dataset).read().split()
batched_input = np.zeros((FLAGS.loop, FLAGS.input_size, FLAGS.input_size, 3), dtype=np.float32)
for input_value in range(FLAGS.loop):
if os.path.exists(fimage[input_value]):
original_image=cv2.imread(fimage[input_value])
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = utils.image_preporcess(np.copy(original_image), [FLAGS.input_size, FLAGS.input_size])
img_in = image_data[np.newaxis, ...].astype(np.float32)
batched_input[input_value, :] = img_in
# batched_input = tf.constant(img_in)
print(input_value)
# yield (batched_input, )
# yield tf.random.normal((1, 416, 416, 3)),
else:
continue
batched_input = tf.constant(batched_input)
yield (batched_input,)
def save_trt():
if FLAGS.quantize_mode == 'int8':
conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(
precision_mode=trt.TrtPrecisionMode.INT8,
max_workspace_size_bytes=4000000000,
use_calibration=True,
max_batch_size=8)
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=FLAGS.weights,
conversion_params=conversion_params)
converter.convert(calibration_input_fn=representative_data_gen)
elif FLAGS.quantize_mode == 'float16':
conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(
precision_mode=trt.TrtPrecisionMode.FP16,
max_workspace_size_bytes=4000000000,
max_batch_size=8)
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=FLAGS.weights, conversion_params=conversion_params)
converter.convert()
else :
conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(
precision_mode=trt.TrtPrecisionMode.FP32,
max_workspace_size_bytes=4000000000,
max_batch_size=8)
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=FLAGS.weights, conversion_params=conversion_params)
converter.convert()
# converter.build(input_fn=representative_data_gen)
converter.save(output_saved_model_dir=FLAGS.output)
print('Done Converting to TF-TRT')
saved_model_loaded = tf.saved_model.load(FLAGS.output)
graph_func = saved_model_loaded.signatures[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
trt_graph = graph_func.graph.as_graph_def()
for n in trt_graph.node:
print(n.op)
if n.op == "TRTEngineOp":
print("Node: %s, %s" % (n.op, n.name.replace("/", "_")))
else:
print("Exclude Node: %s, %s" % (n.op, n.name.replace("/", "_")))
logging.info("model saved to: {}".format(FLAGS.output))
trt_engine_nodes = len([1 for n in trt_graph.node if str(n.op) == 'TRTEngineOp'])
print("numb. of trt_engine_nodes in TensorRT graph:", trt_engine_nodes)
all_nodes = len([1 for n in trt_graph.node])
print("numb. of all_nodes in TensorRT graph:", all_nodes) | null |
181,008 | from backtesting.test import EURUSD, SMA
upper, lower = BBANDS(data, 20, 2)
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from backtesting import Backtest, Strategy
The provided code snippet includes necessary dependencies for implementing the `BBANDS` function. Write a Python function `def BBANDS(data, n_lookback, n_std)` to solve the following problem:
Bollinger bands indicator
Here is the function:
def BBANDS(data, n_lookback, n_std):
"""Bollinger bands indicator"""
hlc3 = (data.High + data.Low + data.Close) / 3
mean, std = hlc3.rolling(n_lookback).mean(), hlc3.rolling(n_lookback).std()
upper = mean + n_std*std
lower = mean - n_std*std
return upper, lower | Bollinger bands indicator |
181,009 | from backtesting.test import EURUSD, SMA
import numpy as np
def get_X(data):
"""Return model design matrix X"""
return data.filter(like='X').values
def get_y(data):
"""Return dependent variable y"""
y = data.Close.pct_change(48).shift(-48) # Returns after roughly two days
y[y.between(-.004, .004)] = 0 # Devalue returns smaller than 0.4%
y[y > 0] = 1
y[y < 0] = -1
return y
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
X, y = get_clean_Xy(data)
from backtesting import Backtest, Strategy
The provided code snippet includes necessary dependencies for implementing the `get_clean_Xy` function. Write a Python function `def get_clean_Xy(df)` to solve the following problem:
Return (X, y) cleaned of NaN values
Here is the function:
def get_clean_Xy(df):
"""Return (X, y) cleaned of NaN values"""
X = get_X(df)
y = get_y(df).values
isnan = np.isnan(y)
X = X[~isnan]
y = y[~isnan]
return X, y | Return (X, y) cleaned of NaN values |
181,010 | from backtesting.test import GOOG
import pandas as pd
from backtesting import Strategy
from backtesting.lib import crossover
from backtesting import Backtest
The provided code snippet includes necessary dependencies for implementing the `SMA` function. Write a Python function `def SMA(values, n)` to solve the following problem:
Return simple moving average of `values`, at each step taking into account `n` previous values.
Here is the function:
def SMA(values, n):
"""
Return simple moving average of `values`, at
each step taking into account `n` previous values.
"""
return pd.Series(values).rolling(n).mean() | Return simple moving average of `values`, at each step taking into account `n` previous values. |
181,011 | import pandas as pd
from backtesting import Strategy, Backtest
from backtesting.lib import resample_apply
from backtesting.test import GOOG
The provided code snippet includes necessary dependencies for implementing the `SMA` function. Write a Python function `def SMA(array, n)` to solve the following problem:
Simple moving average
Here is the function:
def SMA(array, n):
"""Simple moving average"""
return pd.Series(array).rolling(n).mean() | Simple moving average |
181,012 | import pandas as pd
from backtesting import Strategy, Backtest
from backtesting.lib import resample_apply
from backtesting.test import GOOG
The provided code snippet includes necessary dependencies for implementing the `RSI` function. Write a Python function `def RSI(array, n)` to solve the following problem:
Relative strength index
Here is the function:
def RSI(array, n):
"""Relative strength index"""
# Approximate; good enough
gain = pd.Series(array).diff()
loss = gain.copy()
gain[gain < 0] = 0
loss[loss > 0] = 0
rs = gain.ewm(n).mean() / loss.abs().ewm(n).mean()
return 100 - 100 / (1 + rs) | Relative strength index |
181,013 | import re
import os
import json
import execjs
import pickle
import platform
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
The provided code snippet includes necessary dependencies for implementing the `save_cookies` function. Write a Python function `def save_cookies(login_cookies)` to solve the following problem:
保存cookies
Here is the function:
def save_cookies(login_cookies):
""" 保存cookies """
with open('cookies.pkl', 'wb') as fw:
pickle.dump(login_cookies, fw) | 保存cookies |
181,014 | import re
import os
import json
import execjs
import pickle
import platform
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
The provided code snippet includes necessary dependencies for implementing the `load_cookies` function. Write a Python function `def load_cookies()` to solve the following problem:
读取保存的cookies
Here is the function:
def load_cookies():
""" 读取保存的cookies """
try:
with open('cookies.pkl', 'rb') as fr:
cookies = pickle.load(fr)
return cookies
except Exception as e:
print('-' * 10, '加载cookies失败', '-' * 10)
print(e) | 读取保存的cookies |
181,015 | import re
import os
import json
import execjs
import pickle
import platform
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def check_login_status(login_cookies):
""" 检测是否登录成功 """
personal_title = '我的大麦-个人信息'
headers = {
'authority': 'passport.damai.cn',
'cache-control': 'max-age=0',
'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="99", "Google Chrome";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.83 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://passport.damai.cn/login?ru=https://passport.damai.cn/accountinfo/myinfo',
'accept-language': 'zh,en;q=0.9,en-US;q=0.8,zh-CN;q=0.7',
}
response = requests.get('https://passport.damai.cn/accountinfo/myinfo',
headers=headers,
cookies=login_cookies)
personal_info = BeautifulSoup(response.text, 'html.parser')
if personal_info.title.text == personal_title:
return True
else:
return False
The provided code snippet includes necessary dependencies for implementing the `account_login` function. Write a Python function `def account_login(login_type: str, login_id=None, login_password=None)` to solve the following problem:
登录大麦网 :param login_id: :param login_password: :param login_type: 选择哪种方式进行登录 :return:
Here is the function:
def account_login(login_type: str, login_id=None, login_password=None):
"""
登录大麦网
:param login_id:
:param login_password:
:param login_type: 选择哪种方式进行登录
:return:
"""
damai_title = '大麦网-全球演出赛事官方购票平台-100%正品、先付先抢、在线选座!'
login_url = 'https://passport.damai.cn/login'
option = webdriver.ChromeOptions() # 默认Chrome浏览器
# 关闭开发者模式, window.navigator.webdriver 控件检测到你是selenium进入,若关闭会导致出现滑块并无法进入。
option.add_experimental_option('excludeSwitches', ['enable-automation'])
option.add_argument('--disable-blink-features=AutomationControlled')
# option.add_argument('headless') # Chrome以后台模式进行,注释以进行调试
# option.add_argument('window-size=1920x1080') # 指定分辨率
# option.add_argument('no-sandbox') # 取消沙盒模式
# option.add_argument('--disable-gpu') # 禁用GPU加速
# option.add_argument('disable-dev-shm-usage') # 大量渲染时候写入/tmp而非/dev/shm
if platform.system().lower() == 'linux':
chromedriver = os.path.join(os.getcwd(), 'chromedriver_linux')
elif platform.system().lower() == 'windows':
chromedriver = os.path.join(os.getcwd(), 'chromedriver_windows')
else:
chromedriver = os.path.join(os.getcwd(), 'chromedriver_mac')
driver = webdriver.Chrome(chromedriver, options=option)
driver.set_page_load_timeout(60)
driver.get(login_url)
if login_type == 'account':
driver.switch_to.frame('alibaba-login-box') # 切换内置frame,否则会找不到元素位置
driver.find_element_by_name('fm-login-id').send_keys(login_id)
driver.find_element_by_name('fm-login-password').send_keys(login_password)
driver.find_element_by_class_name('password-login').send_keys(Keys.ENTER)
WebDriverWait(driver, 180, 0.5).until(EC.title_contains(damai_title))
login_cookies = {}
if driver.title != damai_title:
print('登录异常,请检查页面登录提示信息')
for cookie in driver.get_cookies():
login_cookies[cookie['name']] = cookie['value']
if check_login_status(login_cookies):
return login_cookies | 登录大麦网 :param login_id: :param login_password: :param login_type: 选择哪种方式进行登录 :return: |
181,016 | import re
import os
import json
import execjs
import pickle
import platform
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
The provided code snippet includes necessary dependencies for implementing the `get_api_param` function. Write a Python function `def get_api_param()` to solve the following problem:
获取请求大麦API所必须的一些参数, 可能大麦网js代码更新后需要修改此函数内的代码以重新获得参数信息
Here is the function:
def get_api_param():
""" 获取请求大麦API所必须的一些参数, 可能大麦网js代码更新后需要修改此函数内的代码以重新获得参数信息 """
def format_param(context):
param = []
for line in context.split(','):
k, v = line.split(':')
param.append('"{}":{}'.format(k, v))
param = json.loads('{' + ','.join(param) + '}')
return param
js_code_define = requests.get(
"https://g.alicdn.com/damai/??/vue-pc/0.0.70/vendor.js,vue-pc/0.0.70/perform/perform.js").text
# 获取商品SKU的API参数
commodity_param = re.search('getSkuData:function.*?\|\|""}}', js_code_define).group()
commodity_param = re.search('data:{.*?\|\|""}}', commodity_param).group()
commodity_param = commodity_param.replace('data:{', ''). \
replace('this.vmSkuData.privilegeId||""}}', '""'). \
replace('itemId:e', 'itemId:""')
commodity_param = format_param(commodity_param)
# 获取订单购买用户的API参数
ex_params = re.search(',i=Z}else{.*?;e&&', js_code_define).group()
ex_params = re.search('{.*}', ex_params).group()
ex_params = ex_params.replace('{var u=', '')[1:-1]
ex_params = format_param(ex_params)
return commodity_param, ex_params | 获取请求大麦API所必须的一些参数, 可能大麦网js代码更新后需要修改此函数内的代码以重新获得参数信息 |
181,017 | import re
import os
import json
import execjs
import pickle
import platform
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
The provided code snippet includes necessary dependencies for implementing the `get_select_seat_params` function. Write a Python function `def get_select_seat_params(item_id, data_id=None)` to solve the following problem:
获取座位信息的必备参数
Here is the function:
def get_select_seat_params(item_id, data_id=None):
""" 获取座位信息的必备参数 """
headers = {
'authority': 'detail.damai.cn',
'accept': '*/*',
'accept-language': 'zh,en;q=0.9,en-US;q=0.8,zh-CN;q=0.7',
'referer': 'https://detail.damai.cn/item.htm?spm=a2oeg.home.card_1.ditem_1.591b23e1qozgyw&id=671100996170',
'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'script',
'sec-fetch-mode': 'no-cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36',
}
params = {"itemId": item_id,
"dataId": data_id,
"dataType": 4,
"apiVersion": 2.0,
"dmChannel": "damai_pc",
"bizCode": "ali.china.damai",
"scenario": "itemsku"
}
response = requests.get('https://detail.damai.cn/subpage', headers=headers, params=params)
if response.status_code == 200:
result = json.loads(response.text[5:-1])
item_basic_info = result.get('itemBasicInfo')
city_id = item_basic_info.get('nationalStandardCityId')
project_id = item_basic_info.get('projectId')
item_id = item_basic_info.get('itemId')
perform_id = result.get('perform').get('performId')
return city_id, project_id, item_id, perform_id | 获取座位信息的必备参数 |
181,018 | import re
import os
import json
import execjs
import pickle
import platform
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def get_sign_code(h5_token: str, time_stamp, api_param) -> str:
"""
返回请求选座信息接口必备的sign信息
:return:
"""
node = execjs.get()
with open('signcode.js', 'r', encoding='utf-8') as f:
js_file = f.read()
js_exec = node.compile(js_file)
param1 = '{}&{}&12574478&'.format(h5_token, time_stamp)
context = param1 + api_param
sign_code = js_exec.call('calcaulate', context)
return sign_code
The provided code snippet includes necessary dependencies for implementing the `get_seat_dynamic_info` function. Write a Python function `def get_seat_dynamic_info(cookies, project_id, item_id, perform_id)` to solve the following problem:
获取 standId, 用于获取所有座位信息
Here is the function:
def get_seat_dynamic_info(cookies, project_id, item_id, perform_id):
""" 获取 standId, 用于获取所有座位信息 """
headers = {
'authority': 'mtop.damai.cn',
'accept': 'application/json',
'accept-language': 'zh,en;q=0.9,en-US;q=0.8,zh-CN;q=0.7',
'content-type': 'application/x-www-form-urlencoded',
# Requests sorts cookies= alphabetically
# 'cookie': 'cna=QAvhGhruGS4CAXx+ibfGKnY/; t=1ff77fc3f03114d784e6055f2e58128e; damai.cn_email=23964951@qq.com; damai.cn_nickName=MakiNaruto; munb=4031179480; xlly_s=1; cookie2=109b25aa6388dfbc71b8d6cb05dbb154; _tb_token_=3e87e37a17bde; _samesite_flag_=true; _hvn_login=18; csg=21db6663; damai.cn_user=SHm/AXRMF7mxpN8uip9sNS+4EH/qiS5ef3Q/K/+slykinDgISXh0XsCSZVMGSCKgGxb2+Rjuqig=; damai.cn_user_new=SHm%2FAXRMF7mxpN8uip9sNS%2B4EH%2FqiS5ef3Q%2FK%2F%2BslykinDgISXh0XsCSZVMGSCKgGxb2%2BRjuqig%3D; h5token=3936e2bf88964af2a37c20b092c61c75_1_1; damai_cn_user=SHm%2FAXRMF7mxpN8uip9sNS%2B4EH%2FqiS5ef3Q%2FK%2F%2BslykinDgISXh0XsCSZVMGSCKgGxb2%2BRjuqig%3D; loginkey=3936e2bf88964af2a37c20b092c61c75_1_1; user_id=108050604; _m_h5_tk=2ef39e419fe42af48f9fb3adc7e043df_1651324694423; _m_h5_tk_enc=a442fe5379084c1830b4418f456f7fb3; tfstk=c4McBWjbVz_B4DFoOKwXCmEadKORZ9Wa-AkSUn0_9Hotq0MPi_CPTJd2qrYBu11..; l=eBgKXWFnLg3Eg5F3BOfwourza77OSIRAguPzaNbMiOCPOTfp5_f1W6qQzd89C3GNh6zeR3J8Iu2zBeYBcSvEdvNX0cWf96Hmn; isg=BEhIJmYUvgYlEtLl2khALh8EGbBa8az7J9rUOgL5lEO23ehHqgF8i95bVb2tbWTT',
'origin': 'https://seatsvc.damai.cn',
'referer': 'https://seatsvc.damai.cn/',
'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-site',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36',
}
h5_token = cookies.get('_m_h5_tk').split('_')[0]
time_stamp = int(datetime.now().timestamp() * 1000)
api_param = json.dumps({'projectId': project_id,
'performanceId': perform_id,
'itemId': item_id,
'hasPromotion': 'true',
'dmChannel': 'pc@damai_pc'}).replace(' ', '')
sign_code = get_sign_code(h5_token, time_stamp, api_param)
params = {
'jsv': '2.6.0',
'appKey': '12574478',
't': time_stamp,
'sign': sign_code,
'type': 'originaljson',
'dataType': 'json',
'v': '1.0',
'H5Request': 'true',
'AntiCreep': 'true',
'AntiFlood': 'true',
'api': 'mtop.damai.wireless.seat.dynamicinfo',
'data': api_param,
}
response = requests.get('https://mtop.damai.cn/h5/mtop.damai.wireless.seat.dynamicinfo/1.0/', params=params,
cookies=cookies, headers=headers)
if response.status_code == 200:
result = json.loads(response.text).get('data')
stand_id = result.get('standColorList')[0].get('standId')
seat_price_list = result.get('priceList')
return stand_id, seat_price_list | 获取 standId, 用于获取所有座位信息 |
181,019 | import re
import os
import json
import execjs
import pickle
import platform
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def get_sign_code(h5_token: str, time_stamp, api_param) -> str:
"""
返回请求选座信息接口必备的sign信息
:return:
"""
node = execjs.get()
with open('signcode.js', 'r', encoding='utf-8') as f:
js_file = f.read()
js_exec = node.compile(js_file)
param1 = '{}&{}&12574478&'.format(h5_token, time_stamp)
context = param1 + api_param
sign_code = js_exec.call('calcaulate', context)
return sign_code
The provided code snippet includes necessary dependencies for implementing the `get_select_seat_api` function. Write a Python function `def get_select_seat_api(cookies, perform_id, city_id)` to solve the following problem:
得到请求所有座位信息的api地址
Here is the function:
def get_select_seat_api(cookies, perform_id, city_id):
""" 得到请求所有座位信息的api地址 """
h5_token = cookies.get('_m_h5_tk').split('_')[0]
time_stamp = int(datetime.now().timestamp() * 1000)
api_param = json.dumps({"cityId": city_id,
"pfId": 2147483647 ^ int(perform_id),
"excludestatus": True,
"svgEncVer": "1.0",
"dmChannel": "pc@damai_pc"}).replace(' ', '')
sign_code = get_sign_code(h5_token, time_stamp, api_param)
headers = {
'authority': 'mtop.damai.cn',
'accept': 'application/json',
'accept-language': 'zh,en;q=0.9,en-US;q=0.8,zh-CN;q=0.7',
'content-type': 'application/x-www-form-urlencoded',
'origin': 'https://seatsvc.damai.cn',
'referer': 'https://seatsvc.damai.cn/',
'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-site',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36',
}
params = {
'jsv': '2.6.0',
'appKey': '12574478',
't': time_stamp,
'sign': sign_code,
'type': 'originaljson',
'dataType': 'json',
'v': '1.3',
'H5Request': 'true',
'AntiCreep': 'true',
'AntiFlood': 'true',
'api': 'mtop.damai.wireless.project.getB2B2CAreaInfo',
'data': api_param,
}
response = requests.get('https://mtop.damai.cn/h5/mtop.damai.wireless.project.getb2b2careainfo/1.3/',
headers=headers, params=params, cookies=cookies)
if response.status_code == 200:
api_text = json.loads(response.text).get('data').get('result')
api_info = json.loads(api_text).get('seatQuYu')
api_address = api_info.get('resourcesPath')
seat_price_list = api_info.get('seatPriceList')
return api_address | 得到请求所有座位信息的api地址 |
181,020 | import re
import os
import json
import execjs
import pickle
import platform
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def get_sign_code(h5_token: str, time_stamp, api_param) -> str:
"""
返回请求选座信息接口必备的sign信息
:return:
"""
node = execjs.get()
with open('signcode.js', 'r', encoding='utf-8') as f:
js_file = f.read()
js_exec = node.compile(js_file)
param1 = '{}&{}&12574478&'.format(h5_token, time_stamp)
context = param1 + api_param
sign_code = js_exec.call('calcaulate', context)
return sign_code
The provided code snippet includes necessary dependencies for implementing the `get_valuable_seat_id` function. Write a Python function `def get_valuable_seat_id(cookies, project_id, perform_id, city_id, stand_id)` to solve the following problem:
获取可用的座位信息
Here is the function:
def get_valuable_seat_id(cookies, project_id, perform_id, city_id, stand_id):
""" 获取可用的座位信息 """
h5_token = cookies.get('_m_h5_tk').split('_')[0]
time_stamp = int(datetime.now().timestamp() * 1000)
api_param = json.dumps({"cityId": city_id,
"pfId": 2147483647 ^ int(perform_id),
"standIds": stand_id,
"channel": 100100010001,
"projectId": project_id,
"lessFirst": True,
"dmChannel": "pc@damai_pc"}).replace(' ', '')
sign_code = get_sign_code(h5_token, time_stamp, api_param)
headers = {
'authority': 'mtop.damai.cn',
'accept': 'application/json',
'accept-language': 'zh,en;q=0.9,en-US;q=0.8,zh-CN;q=0.7',
'content-type': 'application/x-www-form-urlencoded',
'origin': 'https://seatsvc.damai.cn',
'referer': 'https://seatsvc.damai.cn/',
'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-site',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36',
}
params = {
'jsv': '2.6.0',
'appKey': '12574478',
't': time_stamp,
'sign': sign_code,
'type': 'originaljson',
'dataType': 'json',
'v': '1.0',
'H5Request': 'true',
'AntiCreep': 'true',
'AntiFlood': 'true',
'api': 'mtop.damai.wireless.seat.queryseatstatus',
'data': api_param
}
response = requests.get('https://mtop.damai.cn/h5/mtop.damai.wireless.seat.queryseatstatus/1.0/', params=params,
cookies=cookies, headers=headers)
if response.status_code == 200:
seat_data = json.loads(response.text)
seat_data = seat_data.get('data')
return seat_data | 获取可用的座位信息 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.