id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
14,956 | import argparse
import csv
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmengine.config import Config
from mmengine.fileio import FileClient, dump
def get_metas_from_csv_style_ann_file(ann_file):
data_infos = []
cp_filename = None
with open(ann_file, 'r') as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
continue
img_id = line[0]
filename = f'{img_id}.jpg'
if filename != cp_filename:
data_infos.append(dict(filename=filename))
cp_filename = filename
return data_infos | null |
14,957 | import argparse
import csv
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmengine.config import Config
from mmengine.fileio import FileClient, dump
def get_metas_from_txt_style_ann_file(ann_file):
with open(ann_file) as f:
lines = f.readlines()
i = 0
data_infos = []
while i < len(lines):
filename = lines[i].rstrip()
data_infos.append(dict(filename=filename))
skip_lines = int(lines[i + 2]) + 3
i += skip_lines
return data_infos | null |
14,958 | import argparse
import csv
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmengine.config import Config
from mmengine.fileio import FileClient, dump
def get_image_metas(data_info, img_prefix):
file_client = FileClient(backend='disk')
filename = data_info.get('filename', None)
if filename is not None:
if img_prefix is not None:
filename = osp.join(img_prefix, filename)
img_bytes = file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, flag='color')
shape = img.shape
meta = dict(filename=filename, ori_shape=shape)
else:
raise NotImplementedError('Missing `filename` in data_info')
return meta | null |
14,959 | import argparse
import os
from mmengine import Config, DictAction
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--save-path',
default=None,
help='save path of whole config, suffixed with .py, .json or .yml')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args | null |
14,960 | import argparse
import os.path as osp
import numpy as np
from mmengine.fileio import dump, load
from mmengine.utils import mkdir_or_exist, track_parallel_progress
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-root',
type=str,
help='The data root of coco dataset.',
default='./data/coco/')
parser.add_argument(
'--out-dir',
type=str,
help='The output directory of coco semi-supervised annotations.',
default='./data/coco/semi_anns/')
parser.add_argument(
'--labeled-percent',
type=float,
nargs='+',
help='The percentage of labeled data in the training set.',
default=[1, 2, 5, 10])
parser.add_argument(
'--fold',
type=int,
help='K-fold cross validation for semi-supervised object detection.',
default=5)
args = parser.parse_args()
return args | null |
14,961 | import argparse
import os.path as osp
import numpy as np
from mmengine.fileio import dump, load
from mmengine.utils import mkdir_or_exist, track_parallel_progress
def split_coco(data_root, out_dir, percent, fold):
def multi_wrapper(args):
return split_coco(*args) | null |
14,962 | from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
from mmengine.config import Config
from mmengine.utils import mkdir_or_exist
The provided code snippet includes necessary dependencies for implementing the `mmdet2torchserve` function. Write a Python function `def mmdet2torchserve( config_file: str, checkpoint_file: str, output_folder: str, model_name: str, model_version: str = '1.0', force: bool = False, )` to solve the following problem:
Converts MMDetection model (config + checkpoint) to TorchServe `.mar`. Args: config_file: In MMDetection config format. The contents vary for each task repository. checkpoint_file: In MMDetection checkpoint format. The contents vary for each task repository. output_folder: Folder where `{model_name}.mar` will be created. The file created will be in TorchServe archive format. model_name: If not None, used for naming the `{model_name}.mar` file that will be created under `output_folder`. If None, `{Path(checkpoint_file).stem}` will be used. model_version: Model's version. force: If True, if there is an existing `{model_name}.mar` file under `output_folder` it will be overwritten.
Here is the function:
def mmdet2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMDetection config format.
The contents vary for each task repository.
checkpoint_file:
In MMDetection checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mkdir_or_exist(output_folder)
config = Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmdet_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest) | Converts MMDetection model (config + checkpoint) to TorchServe `.mar`. Args: config_file: In MMDetection config format. The contents vary for each task repository. checkpoint_file: In MMDetection checkpoint format. The contents vary for each task repository. output_folder: Folder where `{model_name}.mar` will be created. The file created will be in TorchServe archive format. model_name: If not None, used for naming the `{model_name}.mar` file that will be created under `output_folder`. If None, `{Path(checkpoint_file).stem}` will be used. model_version: Model's version. force: If True, if there is an existing `{model_name}.mar` file under `output_folder` it will be overwritten. |
14,963 | from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
from mmengine.config import Config
from mmengine.utils import mkdir_or_exist
def parse_args():
parser = ArgumentParser(
description='Convert MMDetection models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args | null |
14,964 | import json
import multiprocessing
import os
import sys
from itertools import product
from math import ceil
import cv2
import numpy as np
class PatchGenerator(object):
def __init__(self, info, type='normal', data_dir='/home/liwenxi/panda/raw/PANDA/image_train',
save_img_path='/home/liwenxi/panda/raw/PANDA/patches/s_6000x6000', save_json_path='./new.json'):
def data_transfer(self):
def image(self):
def categorie(self):
def annotation(self):
def getcatid(self, label):
def getsegmentation(self):
def mask2polygons(self):
def data2coco(self):
def save_json(self):
def get_params(self, h, w, th, tw):
def random_crop(self, img, output_size):
def crop(self, img, i, j, th, tw):
def get_center(self, json_file, width, height, patch_size):
def normal_center(self, patch_w, patch_h, num_patch=16):
def slide_window(self, width, height, sizes, steps, img_rate_thr=0.6):
def worker1(train_info):
PatchGenerator(train_info, type='sw',
save_json_path='/home/liwenxi/panda/raw/PANDA/coco_json/train_6000x6000.json') | null |
14,965 | import json
import multiprocessing
import os
import sys
from itertools import product
from math import ceil
import cv2
import numpy as np
class PatchGenerator(object):
def __init__(self, info, type='normal', data_dir='/home/liwenxi/panda/raw/PANDA/image_train',
save_img_path='/home/liwenxi/panda/raw/PANDA/patches/s_6000x6000', save_json_path='./new.json'):
self.data_dir = data_dir
self.type = type
self.save_img_path = save_img_path
self.info = info
self.save_json_path = save_json_path
self.images = []
self.categories = []
self.annotations = []
self.label = []
self.annID = 1
self.num = 0
self.ob = []
if type == 'center':
self.patch_size = [16]
elif type == 'normal':
self.patch_size = [4, 8, 16]
if not os.path.exists(save_img_path):
os.mkdir(save_img_path)
self.save_json()
def data_transfer(self):
for num, json_file in enumerate(self.info):
sys.stdout.write('\r>> Converting image %d/%d' % (
num + 1, len(self.info)))
sys.stdout.flush()
width = self.info[json_file]['image size']['width']
height = self.info[json_file]['image size']['height']
img = cv2.imread(os.path.join(self.data_dir, json_file))
if self.type == 'center':
center_list = self.get_center(json_file, width, height, self.patch_size)
elif self.type == 'normal':
patch_num = self.patch_size[0]
self.patch_w = width // patch_num
self.patch_h = height // patch_num
center_list = self.normal_center(self.patch_w, self.patch_h, patch_num)
elif self.type == 'sw':
center_list = self.slide_window(width, height, [(6000, 6000)], [(5000, 5000)])
for patch_id, center_lf_point in enumerate(center_list):
x, y, patch_w, patch_h = center_lf_point
self.patch_w = patch_w
self.patch_h = patch_h
patch = self.crop(img, y, x, patch_h, patch_w)
self.file_name = os.path.basename(json_file).replace('.jpg', '_' + str(patch_id + 1).zfill(4) + '.jpg')
patch_person_count = 0
for obj in self.info[json_file]['objects list']:
if obj['category'] not in ['person']:
continue
self.supercategory = obj['category']
if self.supercategory not in self.label:
self.categories.append(self.categorie())
self.label.append(self.supercategory)
obj = obj['rects']['full body']
x1 = float(obj['tl']['x'] * width)
y1 = float(obj['tl']['y'] * height)
w = float((obj['br']['x'] - obj['tl']['x']) * width)
h = float((obj['br']['y'] - obj['tl']['y']) * height)
box_x1 = (x1 - x)
box_x2 = box_x1 + w
box_y1 = (y1 - y)
box_y2 = box_y1 + h
box_x1, box_x2 = np.clip((box_x1, box_x2), 0, patch_w)
box_y1, box_y2 = np.clip((box_y1, box_y2), 0, patch_h)
if (box_y2 - box_y1) * (box_x2 - box_x1) < w * h / 2:
continue
self.bbox = [box_x1, box_y1, box_x2 - box_x1, box_y2 - box_y1] # COCO 对应格式[x,y,w,h]
self.area = (box_x2 - box_x1) * (box_y2 - box_y1)
self.annotations.append(self.annotation())
self.annID += 1
patch_person_count += 1
if patch_person_count > 0:
self.images.append(self.image())
self.num += 1
patch = cv2.resize(patch, dsize=(self.patch_w, self.patch_h))
cv2.imwrite(os.path.join(self.save_img_path, self.file_name), patch)
sys.stdout.write('\n')
sys.stdout.flush()
def image(self):
image = {}
image['height'] = self.patch_h
image['width'] = self.patch_w
image['id'] = self.num + 1
image['file_name'] = self.file_name
return image
def categorie(self):
categorie = {}
categorie['supercategory'] = self.supercategory
categorie['id'] = len(self.label) + 1 # 0 is background
categorie['name'] = self.supercategory
return categorie
def annotation(self):
annotation = {}
annotation['segmentation'] = [list(map(float, self.getsegmentation()))]
annotation['iscrowd'] = 0
annotation['image_id'] = self.num + 1
annotation['bbox'] = self.bbox
annotation['area'] = self.area
annotation['category_id'] = self.getcatid(self.supercategory)
annotation['id'] = self.annID
return annotation
def getcatid(self, label):
for categorie in self.categories:
if label == categorie['name']:
return categorie['id']
return -1
def getsegmentation(self):
return [0]
def mask2polygons(self):
contours = cv2.findContours(self.mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
bbox = []
for cont in contours[1]:
[bbox.append(i) for i in list(cont.flatten())]
return bbox
def data2coco(self):
data_coco = {}
data_coco['images'] = self.images
data_coco['categories'] = self.categories
data_coco['annotations'] = self.annotations
return data_coco
def save_json(self):
self.data_transfer()
self.data_coco = self.data2coco()
json.dump(self.data_coco, open(self.save_json_path, 'w'), indent=4)
def get_params(self, h, w, th, tw):
if w == tw and h == th:
return 0, 0, h, w
i = np.random.randint(0, h - th + 1, size=(1,)).item()
j = np.random.randint(0, w - tw + 1, size=(1,)).item()
return i, j
def random_crop(self, img, output_size):
h, w, _ = img.shape
th, tw = output_size
if h + 1 < th or w + 1 < tw:
raise ValueError(
"Required crop size {} is larger then input image size {}".format((th, tw), (h, w))
)
i, j = self.get_params(h, w, th, tw)
target = self.crop(img, i, j, th, tw)
return target, j, i
def crop(self, img, i, j, th, tw):
target = img[i:i + th, j:j + tw, :]
return target
def get_center(self, json_file, width, height, patch_size):
center_list = []
for patch_num in patch_size:
patch_w = width // patch_num
patch_h = height // patch_num
for obj in self.info[json_file]['objects list']:
if obj['category'] != 'person':
continue
self.supercategory = obj['category']
if self.supercategory not in self.label:
self.categories.append(self.categorie())
self.label.append(self.supercategory)
x1 = float(obj['rects']['full body']['tl']['x'] * width)
y1 = float(obj['rects']['full body']['tl']['y'] * height)
w = float((obj['rects']['full body']['br']['x'] - obj['rects']['full body']['tl']['x']) * width)
h = float((obj['rects']['full body']['br']['y'] - obj['rects']['full body']['tl']['y']) * height)
center_x = x1 + w // 2
center_y = y1 + h // 2
lt_x = int(center_x - patch_w // 2)
lt_y = int(center_y - patch_h // 2)
if 0 < lt_x < width - patch_w + 1 and 0 < lt_y < height - patch_h + 1:
center_list.append((lt_x, lt_y, patch_w, patch_h))
return center_list
def normal_center(self, patch_w, patch_h, num_patch=16):
center_list = []
for i in range(num_patch):
for j in range(num_patch):
lt_x = i * patch_w
lt_y = j * patch_h
center_list.append((lt_x, lt_y, patch_w, patch_h))
return center_list
def slide_window(self, width, height, sizes, steps, img_rate_thr=0.6):
"""Slide windows in images and get window position.
Args:
width (int): The width of the image.
height (int): The height of the image.
sizes (list): List of window's sizes.
steps (list): List of window's steps.
img_rate_thr (float): Threshold of window area divided by image area.
Returns:
np.ndarray: Information of valid windows.
"""
assert 1 >= img_rate_thr >= 0, 'The `in_rate_thr` should lie in 0~1'
windows = []
# Sliding windows.
for size, step in zip(sizes, steps):
size_w, size_h = size
step_w, step_h = step
x_num = 1 if width <= size_w else ceil((width - size_w) / step_w + 1)
x_start = [step_w * i for i in range(x_num)]
if len(x_start) > 1 and x_start[-1] + size_w > width:
x_start[-1] = width - size_w
y_num = 1 if height <= size_h else ceil((height - size_h) / step_h + 1)
y_start = [step_h * i for i in range(y_num)]
if len(y_start) > 1 and y_start[-1] + size_h > height:
y_start[-1] = height - size_h
start = np.array(list(product(x_start, y_start)), dtype=int)
windows.append(np.concatenate([start, start + size], axis=1))
windows = np.concatenate(windows, axis=0)
# Calculate the rate of image part in each window.
img_in_wins = windows.copy()
img_in_wins[:, 0::2] = np.clip(img_in_wins[:, 0::2], 0, width)
img_in_wins[:, 1::2] = np.clip(img_in_wins[:, 1::2], 0, height)
img_areas = (img_in_wins[:, 2] - img_in_wins[:, 0]) * \
(img_in_wins[:, 3] - img_in_wins[:, 1])
win_areas = (windows[:, 2] - windows[:, 0]) * \
(windows[:, 3] - windows[:, 1])
img_rates = img_areas / win_areas
if not (img_rates >= img_rate_thr).any():
img_rates[img_rates == img_rates.max()] = 1
windows = windows[img_rates >= img_rate_thr]
return [(int(box[0]), int(box[1]), int(box[2] - box[0]), int(box[3] - box[1])) for box in windows]
def worker2(val_info):
PatchGenerator(val_info, data_dir='/home/liwenxi/panda/raw/PANDA/image_test', type='sw', save_json_path='/home/liwenxi/panda/raw/PANDA/coco_json/test_s4.json') | null |
14,966 | import warnings
from pathlib import Path
from typing import Optional, Sequence, Union
import numpy as np
import torch
import torch.nn as nn
from mmcv.ops import RoIPool
from mmcv.transforms import Compose
from mmengine.config import Config
from mmengine.runner import load_checkpoint
from mmdet.evaluation import get_classes
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample, SampleList
from mmdet.utils import get_test_pipeline_cfg
The provided code snippet includes necessary dependencies for implementing the `init_detector` function. Write a Python function `def init_detector( config: Union[str, Path, Config], checkpoint: Optional[str] = None, palette: str = 'coco', device: str = 'cuda:0', cfg_options: Optional[dict] = None, ) -> nn.Module` to solve the following problem:
Initialize a detector from config file. Args: config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, :obj:`Path`, or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. palette (str): Color palette used for visualization. If palette is stored in checkpoint, use checkpoint's palette first, otherwise use externally passed palette. Currently, supports 'coco', 'voc', 'citys' and 'random'. Defaults to coco. device (str): The device where the anchors will be put on. Defaults to cuda:0. cfg_options (dict, optional): Options to override some settings in the used config. Returns: nn.Module: The constructed detector.
Here is the function:
def init_detector(
config: Union[str, Path, Config],
checkpoint: Optional[str] = None,
palette: str = 'coco',
device: str = 'cuda:0',
cfg_options: Optional[dict] = None,
) -> nn.Module:
"""Initialize a detector from config file.
Args:
config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path,
:obj:`Path`, or the config object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
palette (str): Color palette used for visualization. If palette
is stored in checkpoint, use checkpoint's palette first, otherwise
use externally passed palette. Currently, supports 'coco', 'voc',
'citys' and 'random'. Defaults to coco.
device (str): The device where the anchors will be put on.
Defaults to cuda:0.
cfg_options (dict, optional): Options to override some settings in
the used config.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, (str, Path)):
config = Config.fromfile(config)
elif not isinstance(config, Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if cfg_options is not None:
config.merge_from_dict(cfg_options)
elif 'init_cfg' in config.model.backbone:
config.model.backbone.init_cfg = None
config.model.train_cfg = None
model = MODELS.build(config.model)
# model = build_detector(config.model)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
# Weights converted from elsewhere may not have meta fields.
checkpoint_meta = checkpoint.get('meta', {})
# save the dataset_meta in the model for convenience
if 'dataset_meta' in checkpoint_meta:
# mmdet 3.x
model.dataset_meta = checkpoint_meta['dataset_meta']
elif 'CLASSES' in checkpoint_meta:
# < mmdet 3.x
classes = checkpoint_meta['CLASSES']
model.dataset_meta = {'CLASSES': classes, 'PALETTE': palette}
else:
warnings.simplefilter('once')
warnings.warn(
'dataset_meta or class names are not saved in the '
'checkpoint\'s meta data, use COCO classes by default.')
model.dataset_meta = {
'CLASSES': get_classes('coco'),
'PALETTE': palette
}
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model | Initialize a detector from config file. Args: config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, :obj:`Path`, or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. palette (str): Color palette used for visualization. If palette is stored in checkpoint, use checkpoint's palette first, otherwise use externally passed palette. Currently, supports 'coco', 'voc', 'citys' and 'random'. Defaults to coco. device (str): The device where the anchors will be put on. Defaults to cuda:0. cfg_options (dict, optional): Options to override some settings in the used config. Returns: nn.Module: The constructed detector. |
14,967 | import warnings
from pathlib import Path
from typing import Optional, Sequence, Union
import numpy as np
import torch
import torch.nn as nn
from mmcv.ops import RoIPool
from mmcv.transforms import Compose
from mmengine.config import Config
from mmengine.runner import load_checkpoint
from mmdet.evaluation import get_classes
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample, SampleList
from mmdet.utils import get_test_pipeline_cfg
ImagesType = Union[str, np.ndarray, Sequence[str], Sequence[np.ndarray]]
The provided code snippet includes necessary dependencies for implementing the `inference_detector` function. Write a Python function `def inference_detector( model: nn.Module, imgs: ImagesType, timer=None, test_pipeline: Optional[Compose] = None ) -> Union[DetDataSample, SampleList]` to solve the following problem:
Inference image(s) with the detector. Args: model (nn.Module): The loaded detector. imgs (str, ndarray, Sequence[str/ndarray]): Either image files or loaded images. test_pipeline (:obj:`Compose`): Test pipeline. Returns: :obj:`DetDataSample` or list[:obj:`DetDataSample`]: If imgs is a list or tuple, the same length list type results will be returned, otherwise return the detection results directly.
Here is the function:
def inference_detector(
model: nn.Module,
imgs: ImagesType,
timer=None,
test_pipeline: Optional[Compose] = None
) -> Union[DetDataSample, SampleList]:
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str, ndarray, Sequence[str/ndarray]):
Either image files or loaded images.
test_pipeline (:obj:`Compose`): Test pipeline.
Returns:
:obj:`DetDataSample` or list[:obj:`DetDataSample`]:
If imgs is a list or tuple, the same length list type results
will be returned, otherwise return the detection results directly.
"""
if isinstance(imgs, (list, tuple)):
is_batch = True
else:
imgs = [imgs]
is_batch = False
cfg = model.cfg
if test_pipeline is None:
cfg = cfg.copy()
test_pipeline = get_test_pipeline_cfg(cfg)
if isinstance(imgs[0], np.ndarray):
# Calling this method across libraries will result
# in module unregistered error if not prefixed with mmdet.
test_pipeline[0].type = 'mmdet.LoadImageFromNDArray'
test_pipeline = Compose(test_pipeline)
if model.data_preprocessor.device.type == 'cpu':
for m in model.modules():
assert not isinstance(
m, RoIPool
), 'CPU inference with RoIPool is not supported currently.'
result_list = []
inputs_temp = []
data_samples_temp = []
for img in imgs:
# prepare data
if isinstance(img, np.ndarray):
# TODO: remove img_id.
data_ = dict(img=img, img_id=0)
else:
# TODO: remove img_id.
data_ = dict(img_path=img, img_id=0)
# build the data pipeline
data_ = test_pipeline(data_)
inputs_temp.append(data_['inputs'])
data_samples_temp.append(data_['data_samples'])
data_['inputs'] = inputs_temp
data_['data_samples'] = data_samples_temp
# forward the model
if timer is not None:
timer.tic()
with torch.no_grad():
results = model.test_step(data_)
timer.toc()
else:
with torch.no_grad():
results = model.test_step(data_)
result_list = results
if not is_batch:
return result_list[0]
else:
return result_list | Inference image(s) with the detector. Args: model (nn.Module): The loaded detector. imgs (str, ndarray, Sequence[str/ndarray]): Either image files or loaded images. test_pipeline (:obj:`Compose`): Test pipeline. Returns: :obj:`DetDataSample` or list[:obj:`DetDataSample`]: If imgs is a list or tuple, the same length list type results will be returned, otherwise return the detection results directly. |
14,968 | import warnings
from pathlib import Path
from typing import Optional, Sequence, Union
import numpy as np
import torch
import torch.nn as nn
from mmcv.ops import RoIPool
from mmcv.transforms import Compose
from mmengine.config import Config
from mmengine.runner import load_checkpoint
from mmdet.evaluation import get_classes
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample, SampleList
from mmdet.utils import get_test_pipeline_cfg
The provided code snippet includes necessary dependencies for implementing the `async_inference_detector` function. Write a Python function `async def async_inference_detector(model, imgs)` to solve the following problem:
Async inference image(s) with the detector. Args: model (nn.Module): The loaded detector. img (str | ndarray): Either image files or loaded images. Returns: Awaitable detection results.
Here is the function:
async def async_inference_detector(model, imgs):
"""Async inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
img (str | ndarray): Either image files or loaded images.
Returns:
Awaitable detection results.
"""
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
cfg = model.cfg
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromNDArray'
# cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
datas = []
for img in imgs:
# prepare data
if isinstance(img, np.ndarray):
# directly add img
data = dict(img=img)
else:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
data = test_pipeline(data)
datas.append(data)
for m in model.modules():
assert not isinstance(
m,
RoIPool), 'CPU inference with RoIPool is not supported currently.'
# We don't restore `torch.is_grad_enabled()` value during concurrent
# inference since execution can overlap
torch.set_grad_enabled(False)
results = await model.aforward_test(data, rescale=True)
return results | Async inference image(s) with the detector. Args: model (nn.Module): The loaded detector. img (str | ndarray): Either image files or loaded images. Returns: Awaitable detection results. |
14,969 | import os
import torch
import torch.distributed as dist
def load_checkpoint(config, model, optimizer, lr_scheduler, logger):
logger.info(f"==============> Resuming form {config.MODEL.RESUME}....................")
if config.MODEL.RESUME.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
config.MODEL.RESUME, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu')
msg = model.load_state_dict(checkpoint['model'], strict=False)
logger.info(msg)
max_accuracy = 0.0
if not config.EVAL_MODE and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
config.defrost()
config.TRAIN.START_EPOCH = checkpoint['epoch'] + 1
config.freeze()
if 'amp' in checkpoint and config.AMP_OPT_LEVEL != "O0" and checkpoint['config'].AMP_OPT_LEVEL != "O0":
amp.load_state_dict(checkpoint['amp'])
logger.info(f"=> loaded successfully '{config.MODEL.RESUME}' (epoch {checkpoint['epoch']})")
if 'max_accuracy' in checkpoint:
max_accuracy = checkpoint['max_accuracy']
del checkpoint
torch.cuda.empty_cache()
return max_accuracy | null |
14,970 | import os
import torch
import torch.distributed as dist
def load_pretrained(config, model, logger):
logger.info(f"==============> Loading weight {config.MODEL.PRETRAINED} for fine-tuning......")
checkpoint = torch.load(config.MODEL.PRETRAINED, map_location='cpu')
state_dict = checkpoint['model']
# delete relative_position_index since we always re-init it
relative_position_index_keys = [k for k in state_dict.keys() if "relative_position_index" in k]
for k in relative_position_index_keys:
del state_dict[k]
# delete relative_coords_table since we always re-init it
relative_position_index_keys = [k for k in state_dict.keys() if "relative_coords_table" in k]
for k in relative_position_index_keys:
del state_dict[k]
# delete attn_mask since we always re-init it
attn_mask_keys = [k for k in state_dict.keys() if "attn_mask" in k]
for k in attn_mask_keys:
del state_dict[k]
# bicubic interpolate relative_position_bias_table if not match
relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k]
for k in relative_position_bias_table_keys:
relative_position_bias_table_pretrained = state_dict[k]
relative_position_bias_table_current = model.state_dict()[k]
L1, nH1 = relative_position_bias_table_pretrained.size()
L2, nH2 = relative_position_bias_table_current.size()
if nH1 != nH2:
logger.warning(f"Error in loading {k}, passing......")
else:
if L1 != L2:
# bicubic interpolate relative_position_bias_table if not match
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(
relative_position_bias_table_pretrained.permute(1, 0).view(1, nH1, S1, S1), size=(S2, S2),
mode='bicubic')
state_dict[k] = relative_position_bias_table_pretrained_resized.view(nH2, L2).permute(1, 0)
# bicubic interpolate absolute_pos_embed if not match
absolute_pos_embed_keys = [k for k in state_dict.keys() if "absolute_pos_embed" in k]
for k in absolute_pos_embed_keys:
# dpe
absolute_pos_embed_pretrained = state_dict[k]
absolute_pos_embed_current = model.state_dict()[k]
_, L1, C1 = absolute_pos_embed_pretrained.size()
_, L2, C2 = absolute_pos_embed_current.size()
if C1 != C1:
logger.warning(f"Error in loading {k}, passing......")
else:
if L1 != L2:
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.reshape(-1, S1, S1, C1)
absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.permute(0, 3, 1, 2)
absolute_pos_embed_pretrained_resized = torch.nn.functional.interpolate(
absolute_pos_embed_pretrained, size=(S2, S2), mode='bicubic')
absolute_pos_embed_pretrained_resized = absolute_pos_embed_pretrained_resized.permute(0, 2, 3, 1)
absolute_pos_embed_pretrained_resized = absolute_pos_embed_pretrained_resized.flatten(1, 2)
state_dict[k] = absolute_pos_embed_pretrained_resized
# check classifier, if not match, then re-init classifier to zero
head_bias_pretrained = state_dict['head.bias']
Nc1 = head_bias_pretrained.shape[0]
Nc2 = model.head.bias.shape[0]
if (Nc1 != Nc2):
if Nc1 == 21841 and Nc2 == 1000:
logger.info("loading ImageNet-22K weight to ImageNet-1K ......")
map22kto1k_path = f'data/map22kto1k.txt'
with open(map22kto1k_path) as f:
map22kto1k = f.readlines()
map22kto1k = [int(id22k.strip()) for id22k in map22kto1k]
state_dict['head.weight'] = state_dict['head.weight'][map22kto1k, :]
state_dict['head.bias'] = state_dict['head.bias'][map22kto1k]
else:
torch.nn.init.constant_(model.head.bias, 0.)
torch.nn.init.constant_(model.head.weight, 0.)
del state_dict['head.weight']
del state_dict['head.bias']
logger.warning(f"Error in loading classifier head, re-init classifier head to 0")
msg = model.load_state_dict(state_dict, strict=False)
logger.warning(msg)
logger.info(f"=> loaded successfully '{config.MODEL.PRETRAINED}'")
del checkpoint
torch.cuda.empty_cache() | null |
14,971 | import os
import torch
import torch.distributed as dist
def save_checkpoint(config, epoch, model, max_accuracy, optimizer, lr_scheduler, logger):
save_state = {'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'max_accuracy': max_accuracy,
'epoch': epoch,
'config': config}
if config.AMP_OPT_LEVEL != "O0":
save_state['amp'] = amp.state_dict()
save_path = os.path.join(config.OUTPUT, f'ckpt_epoch_{epoch}.pth')
logger.info(f"{save_path} saving......")
torch.save(save_state, save_path)
logger.info(f"{save_path} saved !!!") | null |
14,972 | import os
import torch
import torch.distributed as dist
def auto_resume_helper(output_dir):
checkpoints = os.listdir(output_dir)
checkpoints = [ckpt for ckpt in checkpoints if ckpt.endswith('pth')]
print(f"All checkpoints founded in {output_dir}: {checkpoints}")
if len(checkpoints) > 0:
latest_checkpoint = max([os.path.join(output_dir, d) for d in checkpoints], key=os.path.getmtime)
print(f"The latest checkpoint founded: {latest_checkpoint}")
resume_file = latest_checkpoint
else:
resume_file = None
return resume_file | null |
14,973 | import os
import time
import random
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy, AverageMeter
from config import get_config
from models import build_model
from data import build_loader
from lr_scheduler import build_scheduler
from optimizer import build_optimizer
from logger import create_logger
from utils import load_checkpoint, load_pretrained, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
def get_config(args):
"""Get a yacs CfgNode object with default values."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
config = _C.clone()
update_config(config, args)
return config
def parse_option():
parser = argparse.ArgumentParser('Swin Transformer training and evaluation script', add_help=False)
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--batch-size', type=int, help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
# distributed training
parser.add_argument("--local_rank", type=int, required=True, help='local rank for DistributedDataParallel')
args, unparsed = parser.parse_known_args()
config = get_config(args)
return args, config | null |
14,974 | import os
import time
import random
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy, AverageMeter
from config import get_config
from models import build_model
from data import build_loader
from lr_scheduler import build_scheduler
from optimizer import build_optimizer
from logger import create_logger
from utils import load_checkpoint, load_pretrained, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
def get_grad_norm(parameters, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
total_norm = total_norm ** (1. / norm_type)
return total_norm
def train_one_epoch(config, model, criterion, data_loader, optimizer, epoch, mixup_fn, lr_scheduler):
model.train()
optimizer.zero_grad()
num_steps = len(data_loader)
batch_time = AverageMeter()
loss_meter = AverageMeter()
norm_meter = AverageMeter()
start = time.time()
end = time.time()
for idx, (samples, targets) in enumerate(data_loader):
samples = samples.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
outputs, _ = model(samples)
if config.TRAIN.ACCUMULATION_STEPS > 1:
loss = criterion(outputs, targets)
loss = loss / config.TRAIN.ACCUMULATION_STEPS
if config.AMP_OPT_LEVEL != "O0":
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.TRAIN.CLIP_GRAD)
else:
grad_norm = get_grad_norm(amp.master_params(optimizer))
else:
loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.TRAIN.CLIP_GRAD)
else:
grad_norm = get_grad_norm(model.parameters())
if (idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0:
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step_update(epoch * num_steps + idx)
else:
loss = criterion(outputs, targets)
optimizer.zero_grad()
if config.AMP_OPT_LEVEL != "O0":
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.TRAIN.CLIP_GRAD)
else:
grad_norm = get_grad_norm(amp.master_params(optimizer))
else:
loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.TRAIN.CLIP_GRAD)
else:
grad_norm = get_grad_norm(model.parameters())
optimizer.step()
lr_scheduler.step_update(epoch * num_steps + idx)
torch.cuda.synchronize()
loss_meter.update(loss.item(), targets.size(0))
norm_meter.update(grad_norm)
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
lr = optimizer.param_groups[0]['lr']
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
etas = batch_time.avg * (num_steps - idx)
logger.info(
f'Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\t'
f'eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f}\t'
f'time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
f'loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f})\t'
f'mem {memory_used:.0f}MB')
epoch_time = time.time() - start
logger.info(f"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}") | null |
14,975 | import os
import time
import random
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy, AverageMeter
from config import get_config
from models import build_model
from data import build_loader
from lr_scheduler import build_scheduler
from optimizer import build_optimizer
from logger import create_logger
from utils import load_checkpoint, load_pretrained, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
def reduce_tensor(tensor):
def validate(config, data_loader, model):
criterion = torch.nn.CrossEntropyLoss()
model.eval()
batch_time = AverageMeter()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
# acc5_meter = AverageMeter()
end = time.time()
for idx, (images, target) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output, _ = model(images)
# measure accuracy and record loss
loss = criterion(output, target)
# acc1, acc5 = accuracy(output, target, topk=(1, 5))
acc1, = accuracy(output, target, topk=(1, ))
acc1 = reduce_tensor(acc1)
# acc5 = reduce_tensor(acc5)
loss = reduce_tensor(loss)
loss_meter.update(loss.item(), target.size(0))
acc1_meter.update(acc1.item(), target.size(0))
# acc5_meter.update(acc5.item(), target.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
logger.info(
f'Test: [{idx}/{len(data_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
# f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
f'Mem {memory_used:.0f}MB')
# logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
# return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
logger.info(f' * Acc@1 {acc1_meter.avg:.3f}')
return acc1_meter.avg, loss_meter.avg | null |
14,976 | import os
import time
import random
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy, AverageMeter
from config import get_config
from models import build_model
from data import build_loader
from lr_scheduler import build_scheduler
from optimizer import build_optimizer
from logger import create_logger
from utils import load_checkpoint, load_pretrained, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
def throughput(data_loader, model, logger):
model.eval()
for idx, (images, _) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
batch_size = images.shape[0]
for i in range(50):
model(images)
torch.cuda.synchronize()
logger.info(f"throughput averaged with 30 times")
tic1 = time.time()
for i in range(30):
model(images)
torch.cuda.synchronize()
tic2 = time.time()
logger.info(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}")
return | null |
14,977 | import glob
import os
import shutil
import time
import random
import argparse
import datetime
import PIL.Image
import cv2
import matplotlib.pyplot as plt
import matplotlib.cm as CM
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import tqdm
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy, AverageMeter
from config import get_config
from models import build_model
from data import build_loader, build_transform
from lr_scheduler import build_scheduler
from optimizer import build_optimizer
from logger import create_logger
from utils import load_checkpoint, load_pretrained, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
def get_config(args):
def parse_option():
parser = argparse.ArgumentParser('Swin Transformer training and evaluation script', add_help=False)
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--batch-size', type=int, help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
# distributed training
parser.add_argument("--local_rank", type=int, required=True, help='local rank for DistributedDataParallel')
args, unparsed = parser.parse_known_args()
config = get_config(args)
return args, config | null |
14,978 | import glob
import os
import shutil
import time
import random
import argparse
import datetime
import PIL.Image
import cv2
import matplotlib.pyplot as plt
import matplotlib.cm as CM
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import tqdm
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy, AverageMeter
from config import get_config
from models import build_model
from data import build_loader, build_transform
from lr_scheduler import build_scheduler
from optimizer import build_optimizer
from logger import create_logger
from utils import load_checkpoint, load_pretrained, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
def get_grad_norm(parameters, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
total_norm = total_norm ** (1. / norm_type)
return total_norm
def train_one_epoch(config, model, criterion, data_loader, optimizer, epoch, mixup_fn, lr_scheduler):
model.train()
optimizer.zero_grad()
num_steps = len(data_loader)
batch_time = AverageMeter()
loss_meter = AverageMeter()
norm_meter = AverageMeter()
start = time.time()
end = time.time()
for idx, (samples, targets) in enumerate(data_loader):
samples = samples.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
outputs = model(samples)
if config.TRAIN.ACCUMULATION_STEPS > 1:
loss = criterion(outputs, targets)
loss = loss / config.TRAIN.ACCUMULATION_STEPS
if config.AMP_OPT_LEVEL != "O0":
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.TRAIN.CLIP_GRAD)
else:
grad_norm = get_grad_norm(amp.master_params(optimizer))
else:
loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.TRAIN.CLIP_GRAD)
else:
grad_norm = get_grad_norm(model.parameters())
if (idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0:
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step_update(epoch * num_steps + idx)
else:
loss = criterion(outputs, targets)
optimizer.zero_grad()
if config.AMP_OPT_LEVEL != "O0":
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.TRAIN.CLIP_GRAD)
else:
grad_norm = get_grad_norm(amp.master_params(optimizer))
else:
loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.TRAIN.CLIP_GRAD)
else:
grad_norm = get_grad_norm(model.parameters())
optimizer.step()
lr_scheduler.step_update(epoch * num_steps + idx)
torch.cuda.synchronize()
loss_meter.update(loss.item(), targets.size(0))
norm_meter.update(grad_norm)
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
lr = optimizer.param_groups[0]['lr']
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
etas = batch_time.avg * (num_steps - idx)
logger.info(
f'Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\t'
f'eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f}\t'
f'time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
f'loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f})\t'
f'mem {memory_used:.0f}MB')
epoch_time = time.time() - start
logger.info(f"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}") | null |
14,979 | import glob
import os
import shutil
import time
import random
import argparse
import datetime
import PIL.Image
import cv2
import matplotlib.pyplot as plt
import matplotlib.cm as CM
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import tqdm
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy, AverageMeter
from config import get_config
from models import build_model
from data import build_loader, build_transform
from lr_scheduler import build_scheduler
from optimizer import build_optimizer
from logger import create_logger
from utils import load_checkpoint, load_pretrained, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
def reduce_tensor(tensor):
def validate(config, data_loader, model):
criterion = torch.nn.CrossEntropyLoss()
model.eval()
batch_time = AverageMeter()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
# acc5_meter = AverageMeter()
end = time.time()
for idx, (images, target) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(images)
# measure accuracy and record loss
loss = criterion(output, target)
# acc1, acc5 = accuracy(output, target, topk=(1, 5))
acc1, = accuracy(output, target, topk=(1, ))
acc1 = reduce_tensor(acc1)
# acc5 = reduce_tensor(acc5)
loss = reduce_tensor(loss)
loss_meter.update(loss.item(), target.size(0))
acc1_meter.update(acc1.item(), target.size(0))
# acc5_meter.update(acc5.item(), target.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
logger.info(
f'Test: [{idx}/{len(data_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
# f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
f'Mem {memory_used:.0f}MB')
# logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
# return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
logger.info(f' * Acc@1 {acc1_meter.avg:.3f}')
return acc1_meter.avg, loss_meter.avg | null |
14,980 | import io
import os
import time
import torch.distributed as dist
import torch.utils.data as data
from PIL import Image
from .zipreader import is_zip_path, ZipReader
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx | null |
14,981 | import io
import os
import time
import torch.distributed as dist
import torch.utils.data as data
from PIL import Image
from .zipreader import is_zip_path, ZipReader
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
def make_dataset(dir, class_to_idx, extensions):
images = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if has_file_allowed_extension(fname, extensions):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images | null |
14,982 | import io
import os
import time
import torch.distributed as dist
import torch.utils.data as data
from PIL import Image
from .zipreader import is_zip_path, ZipReader
def make_dataset_with_ann(ann_file, img_prefix, extensions):
images = []
with open(ann_file, "r") as f:
contents = f.readlines()
for line_str in contents:
path_contents = [c for c in line_str.split('\t')]
im_file_name = path_contents[0]
class_index = int(path_contents[1])
assert str.lower(os.path.splitext(im_file_name)[-1]) in extensions
item = (os.path.join(img_prefix, im_file_name), class_index)
images.append(item)
return images | null |
14,983 | import io
import os
import time
import torch.distributed as dist
import torch.utils.data as data
from PIL import Image
from .zipreader import is_zip_path, ZipReader
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
if isinstance(path, bytes):
img = Image.open(io.BytesIO(path))
elif is_zip_path(path):
data = ZipReader.read(path)
img = Image.open(io.BytesIO(data))
else:
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_img_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path) | null |
14,984 | import os
import torch
import numpy as np
import torch.distributed as dist
from torchvision import datasets, transforms
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import Mixup
from timm.data import create_transform
from .cached_image_folder import CachedImageFolder
from .samplers import SubsetRandomSampler
def build_dataset(is_train, config):
transform = build_transform(is_train, config)
if config.DATA.DATASET == 'imagenet':
prefix = 'train' if is_train else 'val'
if config.DATA.ZIP_MODE:
ann_file = prefix + "_map.txt"
prefix = prefix + ".zip@/"
dataset = CachedImageFolder(config.DATA.DATA_PATH, ann_file, prefix, transform,
cache_mode=config.DATA.CACHE_MODE if is_train else 'part')
else:
root = os.path.join(config.DATA.DATA_PATH, prefix)
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif config.DATA.DATASET == 'camelyon16':
prefix = 'train' if is_train else 'val'
if config.DATA.ZIP_MODE:
ann_file = prefix + "_map.txt"
prefix = prefix + ".zip@/"
dataset = CachedImageFolder(config.DATA.DATA_PATH, ann_file, prefix, transform,
cache_mode=config.DATA.CACHE_MODE if is_train else 'part')
else:
root = os.path.join(config.DATA.DATA_PATH, prefix)
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 2
elif config.DATA.DATASET == 'imagenet22K':
raise NotImplementedError("Imagenet-22K will come soon.")
else:
raise NotImplementedError("We only support ImageNet Now.")
return dataset, nb_classes
class SubsetRandomSampler(torch.utils.data.Sampler):
r"""Samples elements randomly from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices):
self.epoch = 0
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in torch.randperm(len(self.indices)))
def __len__(self):
return len(self.indices)
def set_epoch(self, epoch):
self.epoch = epoch
def build_loader(config):
config.defrost()
dataset_train, config.MODEL.NUM_CLASSES = build_dataset(is_train=True, config=config)
config.freeze()
print(f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build train dataset")
dataset_val, _ = build_dataset(is_train=False, config=config)
print(f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset")
num_tasks = dist.get_world_size()
global_rank = dist.get_rank()
if config.DATA.ZIP_MODE and config.DATA.CACHE_MODE == 'part':
indices = np.arange(dist.get_rank(), len(dataset_train), dist.get_world_size())
sampler_train = SubsetRandomSampler(indices)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
if config.TEST.SEQUENTIAL:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_val = torch.utils.data.distributed.DistributedSampler(
dataset_val, shuffle=False
)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=config.DATA.BATCH_SIZE,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=config.DATA.BATCH_SIZE,
shuffle=False,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
drop_last=False
)
# setup mixup / cutmix
mixup_fn = None
mixup_active = config.AUG.MIXUP > 0 or config.AUG.CUTMIX > 0. or config.AUG.CUTMIX_MINMAX is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=config.AUG.MIXUP, cutmix_alpha=config.AUG.CUTMIX, cutmix_minmax=config.AUG.CUTMIX_MINMAX,
prob=config.AUG.MIXUP_PROB, switch_prob=config.AUG.MIXUP_SWITCH_PROB, mode=config.AUG.MIXUP_MODE,
label_smoothing=config.MODEL.LABEL_SMOOTHING, num_classes=config.MODEL.NUM_CLASSES)
return dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn | null |
14,985 | import torch
from timm.scheduler.cosine_lr import CosineLRScheduler
from timm.scheduler.step_lr import StepLRScheduler
from timm.scheduler.scheduler import Scheduler
class LinearLRScheduler(Scheduler):
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
lr_min_rate: float,
warmup_t=0,
warmup_lr_init=0.,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
def _get_lr(self, t):
def get_epoch_values(self, epoch: int):
def get_update_values(self, num_updates: int):
def build_scheduler(config, optimizer, n_iter_per_epoch):
num_steps = int(config.TRAIN.EPOCHS * n_iter_per_epoch)
warmup_steps = int(config.TRAIN.WARMUP_EPOCHS * n_iter_per_epoch)
decay_steps = int(config.TRAIN.LR_SCHEDULER.DECAY_EPOCHS * n_iter_per_epoch)
lr_scheduler = None
if config.TRAIN.LR_SCHEDULER.NAME == 'cosine':
lr_scheduler = CosineLRScheduler(
optimizer,
t_initial=num_steps,
t_mul=1.,
lr_min=config.TRAIN.MIN_LR,
warmup_lr_init=config.TRAIN.WARMUP_LR,
warmup_t=warmup_steps,
cycle_limit=1,
t_in_epochs=False,
)
elif config.TRAIN.LR_SCHEDULER.NAME == 'linear':
lr_scheduler = LinearLRScheduler(
optimizer,
t_initial=num_steps,
lr_min_rate=0.01,
warmup_lr_init=config.TRAIN.WARMUP_LR,
warmup_t=warmup_steps,
t_in_epochs=False,
)
elif config.TRAIN.LR_SCHEDULER.NAME == 'step':
lr_scheduler = StepLRScheduler(
optimizer,
decay_t=decay_steps,
decay_rate=config.TRAIN.LR_SCHEDULER.DECAY_RATE,
warmup_lr_init=config.TRAIN.WARMUP_LR,
warmup_t=warmup_steps,
t_in_epochs=False,
)
return lr_scheduler | null |
14,986 | import glob
import os.path
import PIL.Image as Image
import tqdm
import multiprocessing
import time
def run_mp(img_path, dst_path):
img = Image.open(img_path)
img = img.crop((48, 48, 720, 720))
img.save(dst_path)
def deamon_thread(q):
print("I love work!")
while not q.empty():
img_path, dst_path = q.get()
run_mp(img_path, dst_path)
print(q.qsize())
print("Bye~") | null |
14,987 | import glob
import os
import shutil
import time
import random
import argparse
import datetime
import PIL.Image
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import tqdm
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy, AverageMeter
from config import get_config
from models import build_model
from data import build_loader, build_transform
from lr_scheduler import build_scheduler
from optimizer import build_optimizer
from logger import create_logger
from utils import load_checkpoint, load_pretrained, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
def get_config(args):
def parse_option():
parser = argparse.ArgumentParser('Swin Transformer training and evaluation script', add_help=False)
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--batch-size', type=int, help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
# distributed training
parser.add_argument("--local_rank", type=int, required=True, help='local rank for DistributedDataParallel')
args, unparsed = parser.parse_known_args()
config = get_config(args)
return args, config | null |
14,988 | from torch import optim as optim
def set_weight_decay(model, skip_list=(), skip_keywords=()):
has_decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or (name in skip_list) or \
check_keywords_in_name(name, skip_keywords):
no_decay.append(param)
# print(f"{name} has no weight decay")
else:
has_decay.append(param)
return [{'params': has_decay},
{'params': no_decay, 'weight_decay': 0.}]
The provided code snippet includes necessary dependencies for implementing the `build_optimizer` function. Write a Python function `def build_optimizer(config, model)` to solve the following problem:
Build optimizer, set weight decay of normalization to 0 by default.
Here is the function:
def build_optimizer(config, model):
"""
Build optimizer, set weight decay of normalization to 0 by default.
"""
skip = {}
skip_keywords = {}
if hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
if hasattr(model, 'no_weight_decay_keywords'):
skip_keywords = model.no_weight_decay_keywords()
parameters = set_weight_decay(model, skip, skip_keywords)
opt_lower = config.TRAIN.OPTIMIZER.NAME.lower()
optimizer = None
if opt_lower == 'sgd':
optimizer = optim.SGD(parameters, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True,
lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS,
lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)
return optimizer | Build optimizer, set weight decay of normalization to 0 by default. |
14,989 | import glob
import os
import shutil
import time
import random
import argparse
import datetime
import PIL.Image
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import tqdm
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy, AverageMeter
from config import get_config
from models import build_model
from data import build_loader, build_transform
from lr_scheduler import build_scheduler
from optimizer import build_optimizer
from logger import create_logger
from utils import load_checkpoint, load_pretrained, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
def get_config(args):
"""Get a yacs CfgNode object with default values."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
config = _C.clone()
update_config(config, args)
return config
def parse_option():
parser = argparse.ArgumentParser('Swin Transformer training and evaluation script', add_help=False)
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--batch-size', type=int, help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
# distributed training
parser.add_argument("--local_rank", type=int, required=True, help='local rank for DistributedDataParallel')
args, unparsed = parser.parse_known_args()
config = get_config(args)
return args, config | null |
14,990 | import glob
import os
import tqdm
import PIL.ImageFile
import PIL.Image
import multiprocessing
import time
def deamon_thread(q):
print("I love work!")
while not q.empty():
img_path = q.get()
name = os.path.basename(img_path)
if 'test' not in img_path:
continue
print(name)
tag = name.split('.')[-2].split('_')[-1]
# if tag not in ['01', '06', '11', '16', '21', '26']:
# continue
PIL.ImageFile.LOAD_TRUNCATED_IMAGES = True
PIL.Image.MAX_IMAGE_PIXELS = None
img = PIL.Image.open(img_path)
size = img.size
img = img.resize((int(size[0]*0.9), int(size[1]*0.9)))
# img = img.resize((int(size[0]//2), int(size[1]//2)))
if os.path.exists(os.path.join(visualization_root, name)):
continue
img.save(os.path.join(visualization_root, name))
print("Bye~") | null |
14,991 | import os
import sys
import logging
import functools
from termcolor import colored
def create_logger(output_dir, dist_rank=0, name=''):
# create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
# create formatter
fmt = '[%(asctime)s %(name)s] (%(filename)s %(lineno)d): %(levelname)s %(message)s'
color_fmt = colored('[%(asctime)s %(name)s]', 'green') + \
colored('(%(filename)s %(lineno)d)', 'yellow') + ': %(levelname)s %(message)s'
# create console handlers for master process
if dist_rank == 0:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(
logging.Formatter(fmt=color_fmt, datefmt='%Y-%m-%d %H:%M:%S'))
logger.addHandler(console_handler)
# create file handlers
file_handler = logging.FileHandler(os.path.join(output_dir, f'log_rank{dist_rank}.txt'), mode='a')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(fmt=fmt, datefmt='%Y-%m-%d %H:%M:%S'))
logger.addHandler(file_handler)
return logger | null |
14,994 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import numpy as np
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
import torchvision.models as models
The provided code snippet includes necessary dependencies for implementing the `window_partition` function. Write a Python function `def window_partition(x, window_size)` to solve the following problem:
Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C)
Here is the function:
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows | Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) |
14,995 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import numpy as np
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
import torchvision.models as models
The provided code snippet includes necessary dependencies for implementing the `window_reverse` function. Write a Python function `def window_reverse(windows, window_size, H, W)` to solve the following problem:
Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C)
Here is the function:
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x | Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) |
14,996 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
The provided code snippet includes necessary dependencies for implementing the `window_partition` function. Write a Python function `def window_partition(x, window_size)` to solve the following problem:
Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C)
Here is the function:
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows | Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) |
14,997 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
The provided code snippet includes necessary dependencies for implementing the `window_reverse` function. Write a Python function `def window_reverse(windows, window_size, H, W)` to solve the following problem:
Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C)
Here is the function:
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x | Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) |
15,000 | from .swin_transformer import SwinTransformer
from .swin_mlp import SwinMLP
from .swin_transformer_v2 import SwinTransformerV2
from .swin_transformer_resnet import SwinTransformerRes
class SwinTransformer(nn.Module):
""" Swin Transformer backbone.
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
pretrain_img_size (int): Input image size for training the pretrained model,
used in absolute postion embedding. Default 224.
patch_size (int | tuple(int)): Patch size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
depths (tuple[int]): Depths of each Swin Transformer stage.
num_heads (tuple[int]): Number of attention head of each stage.
window_size (int): Window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop_rate (float): Dropout rate.
attn_drop_rate (float): Attention dropout rate. Default: 0.
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
patch_norm (bool): If True, add normalization after patch embedding. Default: True.
out_indices (Sequence[int]): Output from which stages.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self,
pretrain_img_size=224,
patch_size=16,
in_chans=3,
num_classes=2,
embed_dim=96,
# depths=[2, 2, 6, 2],
# num_heads=[3, 6, 12, 24],
depths=[9],
num_heads=[6],
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
norm_layer=nn.LayerNorm,
ape=False,
patch_norm=True,
# out_indices=(0, 1, 2, 3),
out_indices=(3,),
# out_indices=(0,),
frozen_stages=-1,
use_checkpoint=False):
super().__init__()
self.pretrain_img_size = pretrain_img_size
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.window_size = window_size
self.num_classes = num_classes
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
# absolute position embedding
if self.ape:
pretrain_img_size = to_2tuple(pretrain_img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [pretrain_img_size[0] // patch_size[0], pretrain_img_size[1] // patch_size[1]]
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=int(embed_dim * 2 ** i_layer),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
# downsample=None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
# self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.num_features = num_features
# add a norm layer for each output
for i_layer in out_indices:
layer = norm_layer(num_features[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features[-1], num_classes) if num_classes > 0 else nn.Identity()
# self._freeze_stages()
self.apply(self._init_weights)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 1 and self.ape:
self.absolute_pos_embed.requires_grad = False
if self.frozen_stages >= 2:
self.pos_drop.eval()
for i in range(0, self.frozen_stages - 1):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
"""Forward function."""
x = self.patch_embed(x)
Wh, Ww = x.size(2), x.size(3)
if self.ape:
# interpolate the position embedding to the corresponding size
absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic')
x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
else:
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
outs = []
for i in range(self.num_layers):
layer = self.layers[i]
x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
if i in self.out_indices:
norm_layer = getattr(self, f'norm{i}')
x_out = norm_layer(x_out)
out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
outs.append(out)
x = outs[-1]
B, C, H, W = x.shape
x = x.permute(0, 2, 3, 1) # B, H, W, C
# padding
pad_l = pad_t = 0
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
B, H, W, C = x.shape
x = x.view(B, H // self.window_size, self.window_size, W // self.window_size, self.window_size, C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, self.window_size, self.window_size, C)
x = x.view(-1, self.window_size*self.window_size, C)
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
x = self.head(x)
x = x.view(B, H // self.window_size, W // self.window_size, self.num_classes)
x = x.permute(0, 3, 1, 2).contiguous()
# # For training on single window
x = x.squeeze(-1).squeeze(-1)
return x, None
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(SwinTransformer, self).train(mode)
self._freeze_stages()
class SwinMLP(nn.Module):
r""" Swin MLP
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin MLP layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Linear, nn.Conv1d)):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {'absolute_pos_embed'}
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
class SwinTransformerV2(nn.Module):
""" Swin Transformer backbone.
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
pretrain_img_size (int): Input image size for training the pretrained model,
used in absolute postion embedding. Default 224.
patch_size (int | tuple(int)): Patch size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
depths (tuple[int]): Depths of each Swin Transformer stage.
num_heads (tuple[int]): Number of attention head of each stage.
window_size (int): Window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop_rate (float): Dropout rate.
attn_drop_rate (float): Attention dropout rate. Default: 0.
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
patch_norm (bool): If True, add normalization after patch embedding. Default: True.
out_indices (Sequence[int]): Output from which stages.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self,
pretrain_img_size=224,
patch_size=16,
in_chans=3,
num_classes=2,
embed_dim=96,
# depths=[2, 2, 6, 2],
# num_heads=[3, 6, 12, 24],
depths=[9],
num_heads=[6],
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
norm_layer=nn.LayerNorm,
ape=False,
patch_norm=True,
# out_indices=(0, 1, 2, 3),
out_indices=(0,),
frozen_stages=-1,
use_checkpoint=False):
super().__init__()
self.pretrain_img_size = pretrain_img_size
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.window_size = window_size
self.num_classes = num_classes
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
# absolute position embedding
if self.ape:
pretrain_img_size = to_2tuple(pretrain_img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [pretrain_img_size[0] // patch_size[0], pretrain_img_size[1] // patch_size[1]]
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=int(embed_dim * 2 ** i_layer),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
# downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
downsample=None,
cls_token=False if (i_layer < self.num_layers - 1) else True,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
# self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.num_features = num_features
# add a norm layer for each output except the last
# for i in range(len(out_indices)-1):
# i_layer = out_indices[i]
# layer = norm_layer(num_features[i_layer])
# layer_name = f'norm{i_layer}'
# self.add_module(layer_name, layer)
for i_layer in out_indices:
layer = norm_layer(num_features[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.head = nn.Linear(self.num_features[-1], num_classes) if num_classes > 0 else nn.Identity()
# self._freeze_stages()
self.apply(self._init_weights)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 1 and self.ape:
self.absolute_pos_embed.requires_grad = False
if self.frozen_stages >= 2:
self.pos_drop.eval()
for i in range(0, self.frozen_stages - 1):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
"""Forward function."""
x = self.patch_embed(x)
print(x.shape)
B, C, H, W = x.shape
nH = int(np.ceil(H / self.window_size))
nW = int(np.ceil(W / self.window_size))
Wh, Ww = x.size(2), x.size(3)
if self.ape:
# interpolate the position embedding to the corresponding size
absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic')
x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
else:
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
outs = []
for i in range(self.num_layers):
layer = self.layers[i]
x_out, cls_tokens, attn, H, W, x, Wh, Ww = layer(x, Wh, Ww)
if i in self.out_indices:
cls_tokens = cls_tokens.contiguous().view(B, nH * nW, self.num_features[-1])
norm_layer = getattr(self, f'norm{i}')
out = norm_layer(torch.cat((cls_tokens, x_out), dim=1))
cls_tokens = out[:, :nH * nW]
x_out = out[:, nH * nW:]
out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
outs.append(out)
#
cls_attn = attn[:, :, 0, 1:] # [B, H, N-1]
cls_attn = cls_attn.mean(dim=1) # [B, N-1]
# print()
# _, idx = torch.topk(cls_attn, left_tokens, dim=1, largest=True, sorted=True) # [B, left_tokens]
# # cls_idx = torch.zeros(B, 1, dtype=idx.dtype, device=idx.device)
# # index = torch.cat([cls_idx, idx + 1], dim=1)
# index = idx.unsqueeze(-1).expand(-1, -1, C) # [B, left_tokens, C]
cls_tokens = cls_tokens.contiguous().view(-1, self.num_features[-1])
cls_tokens = self.head(cls_tokens)
cls_tokens = cls_tokens.view(B, nH, nW, self.num_classes)
cls_tokens = cls_tokens.permute(0, 3, 1, 2).contiguous()
# # For training on single window
cls_tokens = cls_tokens.squeeze(-1).squeeze(-1)
return cls_tokens, cls_attn.reshape(-1, 7, 7)
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(SwinTransformerV2, self).train(mode)
self._freeze_stages()
class SwinTransformerRes(nn.Module):
""" Swin Transformer backbone.
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
pretrain_img_size (int): Input image size for training the pretrained model,
used in absolute postion embedding. Default 224.
patch_size (int | tuple(int)): Patch size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
depths (tuple[int]): Depths of each Swin Transformer stage.
num_heads (tuple[int]): Number of attention head of each stage.
window_size (int): Window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop_rate (float): Dropout rate.
attn_drop_rate (float): Attention dropout rate. Default: 0.
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
patch_norm (bool): If True, add normalization after patch embedding. Default: True.
out_indices (Sequence[int]): Output from which stages.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self,
pretrain_img_size=224,
patch_size=16,
in_chans=3,
num_classes=2,
embed_dim=96,
# depths=[2, 2, 6, 2],
# num_heads=[3, 6, 12, 24],
depths=[9],
num_heads=[6],
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
norm_layer=nn.LayerNorm,
ape=False,
patch_norm=True,
# out_indices=(0, 1, 2, 3),
out_indices=(0,),
frozen_stages=-1,
use_checkpoint=False):
super().__init__()
self.pretrain_img_size = pretrain_img_size
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.window_size = window_size
self.num_classes = num_classes
# split image into non-overlapping patches
# self.patch_embed = PatchEmbed(
# patch_size=16, in_chans=3, embed_dim=3*16*16, norm_layer=None)
self.backbone = models.resnet50(pretrained=True)
self.backbone.fc = nn.Linear(2048, num_classes)
# self._freeze_stages()
self.apply(self._init_weights)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 1 and self.ape:
self.absolute_pos_embed.requires_grad = False
if self.frozen_stages >= 2:
self.pos_drop.eval()
for i in range(0, self.frozen_stages - 1):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
"""Forward function."""
# x = self.patch_embed(x)
# print(x.shape)
B, C, H, W = x.shape
window_size = 16
pad_l = pad_t = 0
pad_r = (window_size - W % window_size) % window_size
pad_b = (window_size - H % window_size) % window_size
x = F.pad(x, (pad_l, pad_r, pad_t, pad_b))
_, _, Hp, Wp = x.shape
x = x.view(B, C, Hp // window_size, window_size, Wp // window_size, window_size)
x = x[:, :, ::2, :, ::2, ]
x = x.flatten(-2, -1).flatten(-3, -2).contiguous()
# print(x.shape)
B, C, H, W = x.shape
window_size = 16*7
pad_l = pad_t = 0
pad_r = (window_size - W % window_size) % window_size
pad_b = (window_size - H % window_size) % window_size
x = F.pad(x, (pad_l, pad_r, pad_t, pad_b))
_, _, Hp, Wp = x.shape
#multi-patch
# x = x.view(B, C, Hp // window_size, window_size, Wp // window_size, window_size)
# x = x.permute(0, 2, 4, 3, 5, 1).contiguous().view(-1, window_size, window_size, C).permute(0, 3, 1, 2)
x = self.backbone(x)
# x = x.view(B, Hp // window_size, Wp // window_size, -1)
# x = x.permute(0, 3, 1, 2).contiguous()
return x, None
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(SwinTransformerRes, self).train(mode)
self._freeze_stages()
def build_model(config):
model_type = config.MODEL.TYPE
if model_type == 'swin':
model = SwinTransformer(pretrain_img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT)
elif model_type == 'swin_v2':
model = SwinTransformerV2(pretrain_img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT)
elif model_type == 'swin_r':
model = SwinTransformerRes(pretrain_img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT)
elif model_type == 'swin_mlp':
model = SwinMLP(pretrain_img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN_MLP.PATCH_SIZE,
in_chans=config.MODEL.SWIN_MLP.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN_MLP.EMBED_DIM,
depths=config.MODEL.SWIN_MLP.DEPTHS,
num_heads=config.MODEL.SWIN_MLP.NUM_HEADS,
window_size=config.MODEL.SWIN_MLP.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN_MLP.MLP_RATIO,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN_MLP.APE,
patch_norm=config.MODEL.SWIN_MLP.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT)
else:
raise NotImplementedError(f"Unkown model: {model_type}")
return model | null |
15,002 | from __future__ import annotations
import asyncio
import msgspec
from typing import Any
The provided code snippet includes necessary dependencies for implementing the `prefixed_send` function. Write a Python function `async def prefixed_send(stream: asyncio.StreamWriter, buffer: bytes) -> None` to solve the following problem:
Write a length-prefixed buffer to the stream
Here is the function:
async def prefixed_send(stream: asyncio.StreamWriter, buffer: bytes) -> None:
"""Write a length-prefixed buffer to the stream"""
# Encode the message length as a 4 byte big-endian integer.
prefix = len(buffer).to_bytes(4, "big")
# Write the prefix and buffer to the stream.
stream.write(prefix)
stream.write(buffer)
await stream.drain() | Write a length-prefixed buffer to the stream |
15,003 | from __future__ import annotations
import asyncio
import msgspec
from typing import Any
The provided code snippet includes necessary dependencies for implementing the `prefixed_recv` function. Write a Python function `async def prefixed_recv(stream: asyncio.StreamReader) -> bytes` to solve the following problem:
Read a length-prefixed buffer from the stream
Here is the function:
async def prefixed_recv(stream: asyncio.StreamReader) -> bytes:
"""Read a length-prefixed buffer from the stream"""
# Read the next 4 byte prefix
prefix = await stream.readexactly(4)
# Convert the prefix back into an integer for the next message length
n = int.from_bytes(prefix, "big")
# Read in the full message buffer
return await stream.readexactly(n) | Read a length-prefixed buffer from the stream |
15,004 | import json
import time
import orjson
import requests
import simdjson
import ujson
import msgspec
def query_msgspec(data: bytes) -> list[tuple[int, str]]:
# Use Struct types to define the JSON schema. For efficiency we only define
# the fields we actually need.
class Package(msgspec.Struct):
name: str
size: int
class RepoData(msgspec.Struct):
packages: dict[str, Package]
# Decode the data as a `RepoData` type
repo_data = msgspec.json.decode(data, type=RepoData)
# Sort packages by `size`, and return the top 10
return sorted(
((p.size, p.name) for p in repo_data.packages.values()), reverse=True
)[:10] | null |
15,005 | import json
import time
import orjson
import requests
import simdjson
import ujson
import msgspec
def query_orjson(data: bytes) -> list[tuple[int, str]]:
repo_data = orjson.loads(data)
return sorted(
((p["size"], p["name"]) for p in repo_data["packages"].values()), reverse=True
)[:10] | null |
15,006 | import json
import time
import orjson
import requests
import simdjson
import ujson
import msgspec
def query_json(data: bytes) -> list[tuple[int, str]]:
repo_data = json.loads(data)
return sorted(
((p["size"], p["name"]) for p in repo_data["packages"].values()), reverse=True
)[:10] | null |
15,007 | import json
import time
import orjson
import requests
import simdjson
import ujson
import msgspec
def query_ujson(data: bytes) -> list[tuple[int, str]]:
repo_data = ujson.loads(data)
return sorted(
((p["size"], p["name"]) for p in repo_data["packages"].values()), reverse=True
)[:10] | null |
15,008 | import json
import time
import orjson
import requests
import simdjson
import ujson
import msgspec
def query_simdjson(data: bytes) -> list[tuple[int, str]]:
repo_data = simdjson.Parser().parse(data)
return sorted(
((p["size"], p["name"]) for p in repo_data["packages"].values()), reverse=True
)[:10] | null |
15,009 | from typing import Any
import msgspec
class PyProject(Base):
build_system: BuildSystem | None = None
project: Project | None = None
tool: dict[str, dict[str, Any]] = {}
The provided code snippet includes necessary dependencies for implementing the `decode` function. Write a Python function `def decode(data: bytes | str) -> PyProject` to solve the following problem:
Decode a ``pyproject.toml`` file from TOML
Here is the function:
def decode(data: bytes | str) -> PyProject:
"""Decode a ``pyproject.toml`` file from TOML"""
return msgspec.toml.decode(data, type=PyProject) | Decode a ``pyproject.toml`` file from TOML |
15,010 | from typing import Any
import msgspec
class PyProject(Base):
build_system: BuildSystem | None = None
project: Project | None = None
tool: dict[str, dict[str, Any]] = {}
The provided code snippet includes necessary dependencies for implementing the `encode` function. Write a Python function `def encode(msg: PyProject) -> bytes` to solve the following problem:
Encode a ``PyProject`` object to TOML
Here is the function:
def encode(msg: PyProject) -> bytes:
"""Encode a ``PyProject`` object to TOML"""
return msgspec.toml.encode(msg) | Encode a ``PyProject`` object to TOML |
15,011 | from time import perf_counter
def bench(name, template):
N_classes = 100
source = "\n".join(template.format(n=i) for i in range(N_classes))
code_obj = compile(source, "__main__", "exec")
# Benchmark defining new types
N = 200
start = perf_counter()
for _ in range(N):
ns = {}
exec(code_obj, ns)
end = perf_counter()
define_time = ((end - start) / (N * N_classes)) * 1e6
C = ns["C0"]
# Benchmark creating new instances
N = 1000
M = 1000
start = perf_counter()
for _ in range(N):
[C(a=i, b=i, c=i, d=i, e=i) for i in range(M)]
end = perf_counter()
init_time = ((end - start) / (N * M)) * 1e6
# Benchmark equality
N = 1000
M = 1000
val = M - 1
needle = C(a=val, b=val, c=val, d=val, e=val)
haystack = [C(a=i, b=i, c=i, d=i, e=i) for i in range(M)]
start = perf_counter()
for _ in range(N):
haystack.index(needle)
end = perf_counter()
equality_time = ((end - start) / (N * M)) * 1e6
# Benchmark order
try:
needle < needle
except TypeError:
order_time = None
else:
start = perf_counter()
for _ in range(N):
for obj in haystack:
if obj >= needle:
break
end = perf_counter()
order_time = ((end - start) / (N * M)) * 1e6
return (name, define_time, init_time, equality_time, order_time) | null |
15,012 | from time import perf_counter
def format_table(results):
columns = (
"",
"import (μs)",
"create (μs)",
"equality (μs)",
"order (μs)",
)
def f(n):
return "N/A" if n is None else f"{n:.2f}"
rows = []
for name, *times in results:
rows.append((f"**{name}**", *(f(t) for t in times)))
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar_underline = "+%s+" % "+".join("=" * (w + 2) for w in widths)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
parts = [bar, header, bar_underline]
for r in rows:
parts.append(row_template % r)
parts.append(bar)
return "\n".join(parts) | null |
15,013 | import gc
import sys
import time
import msgspec
def sizeof(x, _seen=None):
"""Get the recursive sizeof for an object (memoized).
Not generic, works on types used in this benchmark.
"""
if _seen is None:
_seen = set()
_id = id(x)
if _id in _seen:
return 0
_seen.add(_id)
size = sys.getsizeof(x)
if isinstance(x, dict):
for k, v in x.items():
size += sizeof(k, _seen)
size += sizeof(v, _seen)
if hasattr(x, "__dict__"):
size += sizeof(x.__dict__, _seen)
if hasattr(x, "__slots__"):
for k in x.__slots__:
size += sizeof(k, _seen)
size += sizeof(getattr(x, k), _seen)
return size
def bench_gc(cls):
# Allocate a dict of structs
data = {i: cls(i, i, i) for i in range(1_000_000)}
# Run a full collection
start = time.perf_counter()
gc.collect()
stop = time.perf_counter()
gc_time = (stop - start) * 1e3
mibytes = sizeof(data) / (2**20)
return gc_time, mibytes | null |
15,014 | import gc
import sys
import time
import msgspec
def format_table(results):
columns = ("", "GC time (ms)", "Memory Used (MiB)")
rows = []
for name, t, mem in results:
rows.append((f"**{name}**", f"{t:.2f}", f"{mem:.2f}"))
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar_underline = "+%s+" % "+".join("=" * (w + 2) for w in widths)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
parts = [bar, header, bar_underline]
for r in rows:
parts.append(row_template % r)
parts.append(bar)
return "\n".join(parts) | null |
15,015 | from __future__ import annotations
import enum
import dataclasses
import datetime
from typing import Literal
from mashumaro.mixins.orjson import DataClassORJSONMixin
def encode(x):
return x.to_json() | null |
15,016 | from __future__ import annotations
import enum
import dataclasses
import datetime
from typing import Literal
from mashumaro.mixins.orjson import DataClassORJSONMixin
class Directory(DataClassORJSONMixin):
def decode(msg):
return Directory.from_json(msg) | null |
15,017 | import argparse
import json
import tempfile
from ..generate_data import make_filesystem_data
import sys
import subprocess
LIBRARIES = ["msgspec", "mashumaro", "cattrs", "pydantic"]
def parse_list(value):
libs = [lib.strip() for lib in value.split(",")]
for lib in libs:
if lib not in LIBRARIES:
print(f"{lib!r} is not a supported library, choose from {LIBRARIES}")
sys.exit(1)
return libs | null |
15,018 | from __future__ import annotations
import enum
import datetime
from typing import Literal
import attrs
import cattrs.preconf.orjson
converter = cattrs.preconf.orjson.make_converter(omit_if_default=True)
def encode(obj):
return converter.dumps(obj) | null |
15,019 | from __future__ import annotations
import enum
import datetime
from typing import Literal
import attrs
import cattrs.preconf.orjson
class Directory:
name: str
created_by: str
created_at: datetime.datetime
updated_by: str | None = None
updated_at: datetime.datetime | None = None
contents: list[File | Directory]
type: Literal["directory"] = "directory"
converter = cattrs.preconf.orjson.make_converter(omit_if_default=True)
def decode(msg):
return converter.loads(msg, Directory) | null |
15,020 | from __future__ import annotations
import enum
import datetime
from typing import Literal, Annotated
import pydantic
def encode(obj):
return obj.model_dump_json(exclude_defaults=True) | null |
15,021 | from __future__ import annotations
import enum
import datetime
from typing import Literal, Annotated
import pydantic
class Directory(pydantic.BaseModel):
type: Literal["directory"] = "directory"
name: str
created_by: str
created_at: datetime.datetime
updated_by: str | None = None
updated_at: datetime.datetime | None = None
contents: list[Annotated[File | Directory, pydantic.Field(discriminator="type")]]
def decode(msg):
return Directory.model_validate_json(msg) | null |
15,022 | from __future__ import annotations
import enum
import datetime
from typing import Literal, Annotated
import pydantic
def encode(obj):
return obj.json(exclude_defaults=True) | null |
15,023 | from __future__ import annotations
import enum
import datetime
from typing import Literal, Annotated
import pydantic
class Directory(pydantic.BaseModel):
type: Literal["directory"] = "directory"
name: str
created_by: str
created_at: datetime.datetime
updated_by: str | None = None
updated_at: datetime.datetime | None = None
contents: list[Annotated[File | Directory, pydantic.Field(discriminator="type")]]
def decode(msg):
return Directory.parse_raw(msg) | null |
15,024 | import io
import zipfile
import requests
The provided code snippet includes necessary dependencies for implementing the `get_latest_noarch_wheel_size` function. Write a Python function `def get_latest_noarch_wheel_size(library)` to solve the following problem:
Get the total uncompressed size of the latest noarch wheel
Here is the function:
def get_latest_noarch_wheel_size(library):
"""Get the total uncompressed size of the latest noarch wheel"""
resp = requests.get(f"https://pypi.org/pypi/{library}/json").json()
version = resp["info"]["version"]
files = {}
for file_info in resp["releases"][version]:
name = file_info["filename"]
url = file_info["url"]
if name.endswith(".whl"):
files[name] = url
if len(files) != 1:
raise ValueError(
f"Expected to find only 1 matching file for {library}, got {list(files)}"
)
url = list(files.values())[0]
resp = requests.get(url)
fil = io.BytesIO(resp.content)
zfil = zipfile.ZipFile(fil)
size = sum(f.file_size for f in zfil.filelist)
return version, size | Get the total uncompressed size of the latest noarch wheel |
15,025 | import io
import zipfile
import requests
The provided code snippet includes necessary dependencies for implementing the `get_latest_manylinux_wheel_size` function. Write a Python function `def get_latest_manylinux_wheel_size(library)` to solve the following problem:
Get the total uncompressed size of the latest Python 3.10 manylinux x86_64 wheel for the library
Here is the function:
def get_latest_manylinux_wheel_size(library):
"""Get the total uncompressed size of the latest Python 3.10 manylinux
x86_64 wheel for the library"""
resp = requests.get(f"https://pypi.org/pypi/{library}/json").json()
version = resp["info"]["version"]
files = {}
for file_info in resp["releases"][version]:
name = file_info["filename"]
url = file_info["url"]
if "310" in name and "manylinux_2_17_x86_64" in name and "pp73" not in name:
files[name] = url
if len(files) != 1:
raise ValueError(
f"Expected to find only 1 matching file for {library}, got {list(files)}"
)
url = list(files.values())[0]
resp = requests.get(url)
fil = io.BytesIO(resp.content)
zfil = zipfile.ZipFile(fil)
size = sum(f.file_size for f in zfil.filelist)
return version, size | Get the total uncompressed size of the latest Python 3.10 manylinux x86_64 wheel for the library |
15,026 | from __future__ import annotations
import sys
import dataclasses
import json
import timeit
import importlib.metadata
from typing import Any, Literal, Callable
from .generate_data import make_filesystem_data
import msgspec
class Directory(msgspec.Struct, kw_only=True, omit_defaults=True, tag="directory"):
name: str
created_by: str
created_at: str
updated_by: str | None = None
updated_at: str | None = None
contents: list[File | Directory]
class Benchmark:
label: str
version: str
encode: Callable
decode: Callable
schema: Any = None
def run(self, data: bytes) -> dict:
if self.schema is not None:
data = msgspec.convert(data, self.schema)
timer = timeit.Timer("func(data)", globals={"func": self.encode, "data": data})
n, t = timer.autorange()
encode_time = t / n
data = self.encode(data)
timer = timeit.Timer("func(data)", globals={"func": self.decode, "data": data})
n, t = timer.autorange()
decode_time = t / n
return {
"label": self.label,
"encode": encode_time,
"decode": decode_time,
}
def json_benchmarks():
import orjson
import ujson
import rapidjson
import simdjson
simdjson_ver = importlib.metadata.version("pysimdjson")
rj_dumps = rapidjson.Encoder()
rj_loads = rapidjson.Decoder()
def uj_dumps(obj):
return ujson.dumps(obj)
enc = msgspec.json.Encoder()
dec = msgspec.json.Decoder(Directory)
dec2 = msgspec.json.Decoder()
return [
Benchmark("msgspec structs", None, enc.encode, dec.decode, Directory),
Benchmark("msgspec", msgspec.__version__, enc.encode, dec2.decode),
Benchmark("json", None, json.dumps, json.loads),
Benchmark("orjson", orjson.__version__, orjson.dumps, orjson.loads),
Benchmark("ujson", ujson.__version__, uj_dumps, ujson.loads),
Benchmark("rapidjson", rapidjson.__version__, rj_dumps, rj_loads),
Benchmark("simdjson", simdjson_ver, simdjson.dumps, simdjson.loads),
] | null |
15,027 | from __future__ import annotations
import sys
import dataclasses
import json
import timeit
import importlib.metadata
from typing import Any, Literal, Callable
from .generate_data import make_filesystem_data
import msgspec
class Directory(msgspec.Struct, kw_only=True, omit_defaults=True, tag="directory"):
class Benchmark:
def run(self, data: bytes) -> dict:
def msgpack_benchmarks():
import msgpack
import ormsgpack
enc = msgspec.msgpack.Encoder()
dec = msgspec.msgpack.Decoder(Directory)
dec2 = msgspec.msgpack.Decoder()
return [
Benchmark("msgspec structs", None, enc.encode, dec.decode, Directory),
Benchmark("msgspec", msgspec.__version__, enc.encode, dec2.decode),
Benchmark("msgpack", msgpack.__version__, msgpack.dumps, msgpack.loads),
Benchmark(
"ormsgpack", ormsgpack.__version__, ormsgpack.packb, ormsgpack.unpackb
),
] | null |
15,028 | import datetime
import random
import string
class Generator:
UTC = datetime.timezone.utc
DATE_2018 = datetime.datetime(2018, 1, 1, tzinfo=UTC)
DATE_2023 = datetime.datetime(2023, 1, 1, tzinfo=UTC)
PERMISSIONS = ["READ", "WRITE", "READ_WRITE"]
NAMES = [
"alice",
"ben",
"carol",
"daniel",
"esther",
"franklin",
"genevieve",
"harold",
"ilana",
"jerome",
"katelyn",
"leonard",
"monique",
"nathan",
"ora",
"patrick",
"quinn",
"ronald",
"stephanie",
"thomas",
"uma",
"vince",
"wendy",
"xavier",
"yitzchak",
"zahra",
]
def __init__(self, capacity, seed=42):
self.capacity = capacity
self.random = random.Random(seed)
def randdt(self, min, max):
ts = self.random.randint(min.timestamp(), max.timestamp())
return datetime.datetime.fromtimestamp(ts).replace(tzinfo=self.UTC)
def randstr(self, min=None, max=None):
if max is not None:
min = self.random.randint(min, max)
return "".join(self.random.choices(string.ascii_letters, k=min))
def make(self, is_dir):
name = self.randstr(4, 30)
created_by = self.random.choice(self.NAMES)
created_at = self.randdt(self.DATE_2018, self.DATE_2023)
data = {
"type": "directory" if is_dir else "file",
"name": name,
"created_by": created_by,
"created_at": created_at.isoformat(),
}
if self.random.random() > 0.75:
updated_by = self.random.choice(self.NAMES)
updated_at = self.randdt(created_at, self.DATE_2023)
data.update(
updated_by=updated_by,
updated_at=updated_at.isoformat(),
)
if is_dir:
n = min(self.random.randint(0, 30), self.capacity)
self.capacity -= n
data["contents"] = [self.make_node() for _ in range(n)]
else:
data["nbytes"] = self.random.randint(0, 1000000)
data["permissions"] = self.random.choice(self.PERMISSIONS)
return data
def make_node(self):
return self.make(self.random.random() > 0.8)
def generate(self):
self.capacity -= 1
if self.capacity == 0:
out = self.make(False)
else:
out = self.make(True)
while self.capacity:
self.capacity -= 1
out["contents"].append(self.make_node())
return out
def make_filesystem_data(n):
return Generator(n).generate() | null |
15,029 | import collections
import sys
import typing
def get_type_hints(obj):
return _get_type_hints(obj, include_extras=True) | null |
15,030 | import collections
import sys
import typing
if Required is None and _AnnotatedAlias is None:
# No extras available, so no `include_extras`
get_type_hints = _get_type_hints
else:
def get_class_annotations(obj):
def get_typeddict_info(obj):
if isinstance(obj, type):
cls = obj
else:
cls = obj.__origin__
raw_hints = get_class_annotations(obj)
if hasattr(cls, "__required_keys__"):
required = set(cls.__required_keys__)
elif cls.__total__:
required = set(raw_hints)
else:
required = set()
# Both `typing.TypedDict` and `typing_extensions.TypedDict` have a bug
# where `Required`/`NotRequired` aren't properly detected at runtime when
# `__future__.annotations` is enabled, meaning the `__required_keys__`
# isn't correct. This code block works around this issue by amending the
# set of required keys as needed, while also stripping off any
# `Required`/`NotRequired` wrappers.
hints = {}
for k, v in raw_hints.items():
origin = getattr(v, "__origin__", False)
if origin is Required:
required.add(k)
hints[k] = v.__args__[0]
elif origin is NotRequired:
required.discard(k)
hints[k] = v.__args__[0]
else:
hints[k] = v
return hints, required | null |
15,031 | import collections
import sys
import typing
def get_class_annotations(obj):
"""Get the annotations for a class.
This is similar to ``typing.get_type_hints``, except:
- We maintain it
- It leaves extras like ``Annotated``/``ClassVar`` alone
- It resolves any parametrized generics in the class mro. The returned
mapping may still include ``TypeVar`` values, but those should be treated
as their unparametrized variants (i.e. equal to ``Any`` for the common case).
Note that this function doesn't check that Generic types are being used
properly - invalid uses of `Generic` may slip through without complaint.
The assumption here is that the user is making use of a static analysis
tool like ``mypy``/``pyright`` already, which would catch misuse of these
APIs.
"""
hints = {}
mro, typevar_mappings = _get_class_mro_and_typevar_mappings(obj)
for cls in mro:
if cls in (typing.Generic, object):
continue
mapping = typevar_mappings.get(cls)
cls_locals = dict(vars(cls))
cls_globals = getattr(sys.modules.get(cls.__module__, None), "__dict__", {})
ann = cls.__dict__.get("__annotations__", {})
for name, value in ann.items():
if name in hints:
continue
if value is None:
value = type(None)
elif isinstance(value, str):
value = _forward_ref(value)
value = typing._eval_type(value, cls_locals, cls_globals)
if mapping is not None:
value = _apply_params(value, mapping)
hints[name] = value
return hints
def _wrap_attrs_validators(fields, post_init):
def inner(obj):
for field in fields:
field.validator(obj, field, getattr(obj, field.name))
if post_init is not None:
post_init(obj)
return inner
def get_dataclass_info(obj):
if isinstance(obj, type):
cls = obj
else:
cls = obj.__origin__
hints = get_class_annotations(obj)
required = []
optional = []
defaults = []
if hasattr(cls, "__dataclass_fields__"):
from dataclasses import _FIELD, _FIELD_INITVAR, MISSING
for field in cls.__dataclass_fields__.values():
if field._field_type is not _FIELD:
if field._field_type is _FIELD_INITVAR:
raise TypeError(
"dataclasses with `InitVar` fields are not supported"
)
continue
name = field.name
typ = hints[name]
if field.default is not MISSING:
defaults.append(field.default)
optional.append((name, typ, False))
elif field.default_factory is not MISSING:
defaults.append(field.default_factory)
optional.append((name, typ, True))
else:
required.append((name, typ, False))
required.extend(optional)
pre_init = None
post_init = getattr(cls, "__post_init__", None)
else:
from attrs import NOTHING, Factory
fields_with_validators = []
for field in cls.__attrs_attrs__:
name = field.name
typ = hints[name]
default = field.default
if default is not NOTHING:
if isinstance(default, Factory):
if default.takes_self:
raise NotImplementedError(
"Support for default factories with `takes_self=True` "
"is not implemented. File a GitHub issue if you need "
"this feature!"
)
defaults.append(default.factory)
optional.append((name, typ, True))
else:
defaults.append(default)
optional.append((name, typ, False))
else:
required.append((name, typ, False))
if field.validator is not None:
fields_with_validators.append(field)
required.extend(optional)
pre_init = getattr(cls, "__attrs_pre_init__", None)
post_init = getattr(cls, "__attrs_post_init__", None)
if fields_with_validators:
post_init = _wrap_attrs_validators(fields_with_validators, post_init)
return cls, tuple(required), tuple(defaults), pre_init, post_init | null |
15,032 | import collections
import sys
import typing
The provided code snippet includes necessary dependencies for implementing the `rebuild` function. Write a Python function `def rebuild(cls, kwargs)` to solve the following problem:
Used to unpickle Structs with keyword-only fields
Here is the function:
def rebuild(cls, kwargs):
"""Used to unpickle Structs with keyword-only fields"""
return cls(**kwargs) | Used to unpickle Structs with keyword-only fields |
15,033 | import errno
import os
import re
import subprocess
import sys
HANDLERS = {}
The provided code snippet includes necessary dependencies for implementing the `register_vcs_handler` function. Write a Python function `def register_vcs_handler(vcs, method)` to solve the following problem:
Create decorator to mark a method as the handler of a VCS.
Here is the function:
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate | Create decorator to mark a method as the handler of a VCS. |
15,034 | import errno
import os
import re
import subprocess
import sys
The provided code snippet includes necessary dependencies for implementing the `run_command` function. Write a Python function `def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None)` to solve the following problem:
Call the given command(s).
Here is the function:
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode | Call the given command(s). |
15,035 | import errno
import os
import re
import subprocess
import sys
The provided code snippet includes necessary dependencies for implementing the `git_get_keywords` function. Write a Python function `def git_get_keywords(versionfile_abs)` to solve the following problem:
Extract version information from the given file.
Here is the function:
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords | Extract version information from the given file. |
15,036 | import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "msgspec-"
cfg.versionfile_source = "msgspec/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
The provided code snippet includes necessary dependencies for implementing the `get_versions` function. Write a Python function `def get_versions()` to solve the following problem:
Get version information or return default if unable to do so.
Here is the function:
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
} | Get version information or return default if unable to do so. |
15,037 | import datetime as _datetime
from typing import Any, Callable, Optional, Type, TypeVar, Union, overload, Literal
from . import (
DecodeError as _DecodeError,
convert as _convert,
to_builtins as _to_builtins,
)
__all__ = ("encode", "decode")
def __dir__():
return __all__ | null |
15,038 | import datetime as _datetime
from typing import Any, Callable, Optional, Type, TypeVar, Union, overload, Literal
from . import (
DecodeError as _DecodeError,
convert as _convert,
to_builtins as _to_builtins,
)
def _import_tomli_w():
try:
import tomli_w # type: ignore
return tomli_w
except ImportError:
raise ImportError(
"`msgspec.toml.encode` requires `tomli_w` be installed.\n\n"
"Please either `pip` or `conda` install it as follows:\n\n"
" $ python -m pip install tomli_w # using pip\n"
" $ conda install tomli_w # or using conda"
) from None
The provided code snippet includes necessary dependencies for implementing the `encode` function. Write a Python function `def encode( obj: Any, *, enc_hook: Optional[Callable[[Any], Any]] = None, order: Literal[None, "deterministic", "sorted"] = None, ) -> bytes` to solve the following problem:
Serialize an object as TOML. Parameters ---------- obj : Any The object to serialize. enc_hook : callable, optional A callable to call for objects that aren't supported msgspec types. Takes the unsupported object and should return a supported object, or raise a ``NotImplementedError`` if unsupported. order : {None, 'deterministic', 'sorted'}, optional The ordering to use when encoding unordered compound types. - ``None``: All objects are encoded in the most efficient manner matching their in-memory representations. The default. - `'deterministic'`: Unordered collections (sets, dicts) are sorted to ensure a consistent output between runs. Useful when comparison/hashing of the encoded binary output is necessary. - `'sorted'`: Like `'deterministic'`, but *all* object-like types (structs, dataclasses, ...) are also sorted by field name before encoding. This is slower than `'deterministic'`, but may produce more human-readable output. Returns ------- data : bytes The serialized object. See Also -------- decode
Here is the function:
def encode(
obj: Any,
*,
enc_hook: Optional[Callable[[Any], Any]] = None,
order: Literal[None, "deterministic", "sorted"] = None,
) -> bytes:
"""Serialize an object as TOML.
Parameters
----------
obj : Any
The object to serialize.
enc_hook : callable, optional
A callable to call for objects that aren't supported msgspec types.
Takes the unsupported object and should return a supported object, or
raise a ``NotImplementedError`` if unsupported.
order : {None, 'deterministic', 'sorted'}, optional
The ordering to use when encoding unordered compound types.
- ``None``: All objects are encoded in the most efficient manner
matching their in-memory representations. The default.
- `'deterministic'`: Unordered collections (sets, dicts) are sorted to
ensure a consistent output between runs. Useful when
comparison/hashing of the encoded binary output is necessary.
- `'sorted'`: Like `'deterministic'`, but *all* object-like types
(structs, dataclasses, ...) are also sorted by field name before
encoding. This is slower than `'deterministic'`, but may produce more
human-readable output.
Returns
-------
data : bytes
The serialized object.
See Also
--------
decode
"""
toml = _import_tomli_w()
msg = _to_builtins(
obj,
builtin_types=(_datetime.datetime, _datetime.date, _datetime.time),
str_keys=True,
enc_hook=enc_hook,
order=order,
)
return toml.dumps(msg).encode("utf-8") | Serialize an object as TOML. Parameters ---------- obj : Any The object to serialize. enc_hook : callable, optional A callable to call for objects that aren't supported msgspec types. Takes the unsupported object and should return a supported object, or raise a ``NotImplementedError`` if unsupported. order : {None, 'deterministic', 'sorted'}, optional The ordering to use when encoding unordered compound types. - ``None``: All objects are encoded in the most efficient manner matching their in-memory representations. The default. - `'deterministic'`: Unordered collections (sets, dicts) are sorted to ensure a consistent output between runs. Useful when comparison/hashing of the encoded binary output is necessary. - `'sorted'`: Like `'deterministic'`, but *all* object-like types (structs, dataclasses, ...) are also sorted by field name before encoding. This is slower than `'deterministic'`, but may produce more human-readable output. Returns ------- data : bytes The serialized object. See Also -------- decode |
15,039 | import datetime as _datetime
from typing import Any, Callable, Optional, Type, TypeVar, Union, overload, Literal
from . import (
DecodeError as _DecodeError,
convert as _convert,
to_builtins as _to_builtins,
)
def decode(
buf: Union[bytes, str],
*,
strict: bool = True,
dec_hook: Optional[Callable[[type, Any], Any]] = None,
) -> Any:
pass | null |
15,040 | import datetime as _datetime
from typing import Any, Callable, Optional, Type, TypeVar, Union, overload, Literal
from . import (
DecodeError as _DecodeError,
convert as _convert,
to_builtins as _to_builtins,
)
T = TypeVar("T")
def decode(
buf: Union[bytes, str],
*,
type: Type[T] = ...,
strict: bool = True,
dec_hook: Optional[Callable[[type, Any], Any]] = None,
) -> T:
pass | null |
15,041 | import datetime as _datetime
from typing import Any, Callable, Optional, Type, TypeVar, Union, overload, Literal
from . import (
DecodeError as _DecodeError,
convert as _convert,
to_builtins as _to_builtins,
)
def decode(
buf: Union[bytes, str],
*,
type: Any = ...,
strict: bool = True,
dec_hook: Optional[Callable[[type, Any], Any]] = None,
) -> Any:
pass | null |
15,042 | import datetime as _datetime
from typing import Any, Callable, Optional, Type, TypeVar, Union, overload, Literal
from . import (
DecodeError as _DecodeError,
convert as _convert,
to_builtins as _to_builtins,
)
def _import_tomllib():
try:
import tomllib # type: ignore
return tomllib
except ImportError:
pass
try:
import tomli # type: ignore
return tomli
except ImportError:
raise ImportError(
"`msgspec.toml.decode` requires `tomli` be installed.\n\n"
"Please either `pip` or `conda` install it as follows:\n\n"
" $ python -m pip install tomli # using pip\n"
" $ conda install tomli # or using conda"
) from None
The provided code snippet includes necessary dependencies for implementing the `decode` function. Write a Python function `def decode(buf, *, type=Any, strict=True, dec_hook=None)` to solve the following problem:
Deserialize an object from TOML. Parameters ---------- buf : bytes-like or str The message to decode. type : type, optional A Python type (in type annotation form) to decode the object as. If provided, the message will be type checked and decoded as the specified type. Defaults to `Any`, in which case the message will be decoded using the default TOML types. strict : bool, optional Whether type coercion rules should be strict. Setting to False enables a wider set of coercion rules from string to non-string types for all values. Default is True. dec_hook : callable, optional An optional callback for handling decoding custom types. Should have the signature ``dec_hook(type: Type, obj: Any) -> Any``, where ``type`` is the expected message type, and ``obj`` is the decoded representation composed of only basic TOML types. This hook should transform ``obj`` into type ``type``, or raise a ``NotImplementedError`` if unsupported. Returns ------- obj : Any The deserialized object. See Also -------- encode
Here is the function:
def decode(buf, *, type=Any, strict=True, dec_hook=None):
"""Deserialize an object from TOML.
Parameters
----------
buf : bytes-like or str
The message to decode.
type : type, optional
A Python type (in type annotation form) to decode the object as. If
provided, the message will be type checked and decoded as the specified
type. Defaults to `Any`, in which case the message will be decoded
using the default TOML types.
strict : bool, optional
Whether type coercion rules should be strict. Setting to False enables
a wider set of coercion rules from string to non-string types for all
values. Default is True.
dec_hook : callable, optional
An optional callback for handling decoding custom types. Should have
the signature ``dec_hook(type: Type, obj: Any) -> Any``, where ``type``
is the expected message type, and ``obj`` is the decoded representation
composed of only basic TOML types. This hook should transform ``obj``
into type ``type``, or raise a ``NotImplementedError`` if unsupported.
Returns
-------
obj : Any
The deserialized object.
See Also
--------
encode
"""
toml = _import_tomllib()
if isinstance(buf, str):
str_buf = buf
elif isinstance(buf, (bytes, bytearray)):
str_buf = buf.decode("utf-8")
else:
# call `memoryview` first, since `bytes(1)` is actually valid
str_buf = bytes(memoryview(buf)).decode("utf-8")
try:
obj = toml.loads(str_buf)
except toml.TOMLDecodeError as exc:
raise _DecodeError(str(exc)) from None
if type is Any:
return obj
return _convert(
obj,
type,
builtin_types=(_datetime.datetime, _datetime.date, _datetime.time),
str_keys=True,
strict=strict,
dec_hook=dec_hook,
) | Deserialize an object from TOML. Parameters ---------- buf : bytes-like or str The message to decode. type : type, optional A Python type (in type annotation form) to decode the object as. If provided, the message will be type checked and decoded as the specified type. Defaults to `Any`, in which case the message will be decoded using the default TOML types. strict : bool, optional Whether type coercion rules should be strict. Setting to False enables a wider set of coercion rules from string to non-string types for all values. Default is True. dec_hook : callable, optional An optional callback for handling decoding custom types. Should have the signature ``dec_hook(type: Type, obj: Any) -> Any``, where ``type`` is the expected message type, and ``obj`` is the decoded representation composed of only basic TOML types. This hook should transform ``obj`` into type ``type``, or raise a ``NotImplementedError`` if unsupported. Returns ------- obj : Any The deserialized object. See Also -------- encode |
15,043 | from __future__ import annotations
from typing import Any
from . import NODEFAULT, Struct, field
from ._core import ( # noqa
Factory as _Factory,
StructConfig,
asdict,
astuple,
replace,
force_setattr,
)
from ._utils import get_class_annotations as _get_class_annotations
__all__ = (
"FieldInfo",
"StructConfig",
"asdict",
"astuple",
"fields",
"force_setattr",
"replace",
)
def __dir__():
return __all__ | null |
15,044 | from __future__ import annotations
from typing import Any
from . import NODEFAULT, Struct, field
from ._core import ( # noqa
Factory as _Factory,
StructConfig,
asdict,
astuple,
replace,
force_setattr,
)
from ._utils import get_class_annotations as _get_class_annotations
class FieldInfo(Struct):
"""A record describing a field in a struct type.
Parameters
----------
name: str
The field name as seen by Python code (e.g. ``field_one``).
encode_name: str
The name used when encoding/decoding the field. This may differ if
the field is renamed (e.g. ``fieldOne``).
type: Any
The full field type annotation.
default: Any, optional
A default value for the field. Will be `NODEFAULT` if no default value
is set.
default_factory: Any, optional
A callable that creates a default value for the field. Will be
`NODEFAULT` if no ``default_factory`` is set.
"""
name: str
encode_name: str
type: Any
default: Any = field(default_factory=lambda: NODEFAULT)
default_factory: Any = field(default_factory=lambda: NODEFAULT)
def required(self) -> bool:
"""A helper for checking whether a field is required"""
return self.default is NODEFAULT and self.default_factory is NODEFAULT
The provided code snippet includes necessary dependencies for implementing the `fields` function. Write a Python function `def fields(type_or_instance: Struct | type[Struct]) -> tuple[FieldInfo]` to solve the following problem:
Get information about the fields in a Struct. Parameters ---------- type_or_instance: A struct type or instance. Returns ------- tuple[FieldInfo]
Here is the function:
def fields(type_or_instance: Struct | type[Struct]) -> tuple[FieldInfo]:
"""Get information about the fields in a Struct.
Parameters
----------
type_or_instance:
A struct type or instance.
Returns
-------
tuple[FieldInfo]
"""
if isinstance(type_or_instance, Struct):
annotated_cls = cls = type(type_or_instance)
else:
annotated_cls = type_or_instance
cls = getattr(type_or_instance, "__origin__", type_or_instance)
if not (isinstance(cls, type) and issubclass(cls, Struct)):
raise TypeError("Must be called with a struct type or instance")
hints = _get_class_annotations(annotated_cls)
npos = len(cls.__struct_fields__) - len(cls.__struct_defaults__)
fields = []
for name, encode_name, default_obj in zip(
cls.__struct_fields__,
cls.__struct_encode_fields__,
(NODEFAULT,) * npos + cls.__struct_defaults__,
):
default = default_factory = NODEFAULT
if isinstance(default_obj, _Factory):
default_factory = default_obj.factory
elif default_obj is not NODEFAULT:
default = default_obj
field = FieldInfo(
name=name,
encode_name=encode_name,
type=hints[name],
default=default,
default_factory=default_factory,
)
fields.append(field)
return tuple(fields) | Get information about the fields in a Struct. Parameters ---------- type_or_instance: A struct type or instance. Returns ------- tuple[FieldInfo] |
15,047 | from __future__ import annotations
import re
import textwrap
from collections.abc import Iterable
from typing import Any, Optional, Callable
from . import inspect as mi, to_builtins
def schema_components(
types: Iterable[Any],
*,
schema_hook: Optional[Callable[[type], dict[str, Any]]] = None,
ref_template: str = "#/$defs/{name}",
) -> tuple[tuple[dict[str, Any], ...], dict[str, Any]]:
"""Generate JSON Schemas for one or more types.
Any schemas for (potentially) shared components are extracted and returned
in a separate ``components`` dict.
Parameters
----------
types : Iterable[type]
An iterable of one or more types to generate schemas for.
schema_hook : callable, optional
An optional callback to use for generating JSON schemas of custom
types. Will be called with the custom type, and should return a dict
representation of the JSON schema for that type.
ref_template : str, optional
A template to use when generating ``"$ref"`` fields. This template is
formatted with the type name as ``template.format(name=name)``. This
can be useful if you intend to store the ``components`` mapping
somewhere other than a top-level ``"$defs"`` field. For example, you
might use ``ref_template="#/components/{name}"`` if generating an
OpenAPI schema.
Returns
-------
schemas : tuple[dict]
A tuple of JSON Schemas, one for each type in ``types``.
components : dict
A mapping of name to schema for any shared components used by
``schemas``.
See Also
--------
schema
"""
type_infos = mi.multi_type_info(types)
component_types = _collect_component_types(type_infos)
name_map = _build_name_map(component_types)
gen = _SchemaGenerator(name_map, schema_hook, ref_template)
schemas = tuple(gen.to_schema(t) for t in type_infos)
components = {
name_map[cls]: gen.to_schema(t, False) for cls, t in component_types.items()
}
return schemas, components
The provided code snippet includes necessary dependencies for implementing the `schema` function. Write a Python function `def schema( type: Any, *, schema_hook: Optional[Callable[[type], dict[str, Any]]] = None ) -> dict[str, Any]` to solve the following problem:
Generate a JSON Schema for a given type. Any schemas for (potentially) shared components are extracted and stored in a top-level ``"$defs"`` field. If you want to generate schemas for multiple types, or to have more control over the generated schema you may want to use ``schema_components`` instead. Parameters ---------- type : type The type to generate the schema for. schema_hook : callable, optional An optional callback to use for generating JSON schemas of custom types. Will be called with the custom type, and should return a dict representation of the JSON schema for that type. Returns ------- schema : dict The generated JSON Schema. See Also -------- schema_components
Here is the function:
def schema(
type: Any, *, schema_hook: Optional[Callable[[type], dict[str, Any]]] = None
) -> dict[str, Any]:
"""Generate a JSON Schema for a given type.
Any schemas for (potentially) shared components are extracted and stored in
a top-level ``"$defs"`` field.
If you want to generate schemas for multiple types, or to have more control
over the generated schema you may want to use ``schema_components`` instead.
Parameters
----------
type : type
The type to generate the schema for.
schema_hook : callable, optional
An optional callback to use for generating JSON schemas of custom
types. Will be called with the custom type, and should return a dict
representation of the JSON schema for that type.
Returns
-------
schema : dict
The generated JSON Schema.
See Also
--------
schema_components
"""
(out,), components = schema_components((type,), schema_hook=schema_hook)
if components:
out["$defs"] = components
return out | Generate a JSON Schema for a given type. Any schemas for (potentially) shared components are extracted and stored in a top-level ``"$defs"`` field. If you want to generate schemas for multiple types, or to have more control over the generated schema you may want to use ``schema_components`` instead. Parameters ---------- type : type The type to generate the schema for. schema_hook : callable, optional An optional callback to use for generating JSON schemas of custom types. Will be called with the custom type, and should return a dict representation of the JSON schema for that type. Returns ------- schema : dict The generated JSON Schema. See Also -------- schema_components |
15,048 | from __future__ import annotations
import re
import textwrap
from collections.abc import Iterable
from typing import Any, Optional, Callable
from . import inspect as mi, to_builtins
def _get_doc(t: mi.Type) -> str:
assert hasattr(t, "cls")
cls = getattr(t.cls, "__origin__", t.cls)
doc = getattr(cls, "__doc__", "")
if not doc:
return ""
doc = textwrap.dedent(doc).strip("\r\n")
if isinstance(t, mi.EnumType):
if doc == "An enumeration.":
return ""
elif isinstance(t, (mi.NamedTupleType, mi.DataclassType)):
if doc.startswith(f"{cls.__name__}(") and doc.endswith(")"):
return ""
return doc | null |
15,049 | from __future__ import annotations
import datetime
import decimal
import enum
import uuid
from collections.abc import Iterable
from typing import (
Any,
Final,
Literal,
Tuple,
Type as typing_Type,
TypeVar,
Union,
)
import msgspec
from msgspec import NODEFAULT, UNSET, UnsetType as _UnsetType
from ._core import ( # type: ignore
Factory as _Factory,
to_builtins as _to_builtins,
)
from ._utils import ( # type: ignore
_CONCRETE_TYPES,
_AnnotatedAlias,
get_class_annotations as _get_class_annotations,
get_dataclass_info as _get_dataclass_info,
get_typeddict_info as _get_typeddict_info,
)
__all__ = (
"type_info",
"multi_type_info",
"Type",
"Metadata",
"AnyType",
"NoneType",
"BoolType",
"IntType",
"FloatType",
"StrType",
"BytesType",
"ByteArrayType",
"MemoryViewType",
"DateTimeType",
"TimeType",
"DateType",
"TimeDeltaType",
"UUIDType",
"DecimalType",
"ExtType",
"RawType",
"EnumType",
"LiteralType",
"CustomType",
"UnionType",
"CollectionType",
"ListType",
"SetType",
"FrozenSetType",
"VarTupleType",
"TupleType",
"DictType",
"Field",
"TypedDictType",
"NamedTupleType",
"DataclassType",
"StructType",
)
def __dir__():
return __all__ | null |
15,050 | from __future__ import annotations
import datetime
import decimal
import enum
import uuid
from collections.abc import Iterable
from typing import (
Any,
Final,
Literal,
Tuple,
Type as typing_Type,
TypeVar,
Union,
)
import msgspec
from msgspec import NODEFAULT, UNSET, UnsetType as _UnsetType
from ._core import ( # type: ignore
Factory as _Factory,
to_builtins as _to_builtins,
)
from ._utils import ( # type: ignore
_CONCRETE_TYPES,
_AnnotatedAlias,
get_class_annotations as _get_class_annotations,
get_dataclass_info as _get_dataclass_info,
get_typeddict_info as _get_typeddict_info,
)
class Type(msgspec.Struct):
"""The base Type."""
def multi_type_info(types: Iterable[Any]) -> tuple[Type, ...]:
"""Get information about multiple msgspec-compatible types.
Parameters
----------
types: an iterable of types
The types to get info about.
Returns
-------
tuple[Type, ...]
Examples
--------
>>> msgspec.inspect.multi_type_info([int, float, list[str]]) # doctest: +NORMALIZE_WHITESPACE
(IntType(gt=None, ge=None, lt=None, le=None, multiple_of=None),
FloatType(gt=None, ge=None, lt=None, le=None, multiple_of=None),
ListType(item_type=StrType(min_length=None, max_length=None, pattern=None),
min_length=None, max_length=None))
"""
return _Translator(types).run()
The provided code snippet includes necessary dependencies for implementing the `type_info` function. Write a Python function `def type_info(type: Any) -> Type` to solve the following problem:
Get information about a msgspec-compatible type. Note that if you need to inspect multiple types it's more efficient to call `multi_type_info` once with a sequence of types than calling `type_info` multiple times. Parameters ---------- type: type The type to get info about. Returns ------- Type Examples -------- >>> msgspec.inspect.type_info(bool) BoolType() >>> msgspec.inspect.type_info(int) IntType(gt=None, ge=None, lt=None, le=None, multiple_of=None) >>> msgspec.inspect.type_info(list[int]) # doctest: +NORMALIZE_WHITESPACE ListType(item_type=IntType(gt=None, ge=None, lt=None, le=None, multiple_of=None), min_length=None, max_length=None)
Here is the function:
def type_info(type: Any) -> Type:
"""Get information about a msgspec-compatible type.
Note that if you need to inspect multiple types it's more efficient to call
`multi_type_info` once with a sequence of types than calling `type_info`
multiple times.
Parameters
----------
type: type
The type to get info about.
Returns
-------
Type
Examples
--------
>>> msgspec.inspect.type_info(bool)
BoolType()
>>> msgspec.inspect.type_info(int)
IntType(gt=None, ge=None, lt=None, le=None, multiple_of=None)
>>> msgspec.inspect.type_info(list[int]) # doctest: +NORMALIZE_WHITESPACE
ListType(item_type=IntType(gt=None, ge=None, lt=None, le=None, multiple_of=None),
min_length=None, max_length=None)
"""
return multi_type_info([type])[0] | Get information about a msgspec-compatible type. Note that if you need to inspect multiple types it's more efficient to call `multi_type_info` once with a sequence of types than calling `type_info` multiple times. Parameters ---------- type: type The type to get info about. Returns ------- Type Examples -------- >>> msgspec.inspect.type_info(bool) BoolType() >>> msgspec.inspect.type_info(int) IntType(gt=None, ge=None, lt=None, le=None, multiple_of=None) >>> msgspec.inspect.type_info(list[int]) # doctest: +NORMALIZE_WHITESPACE ListType(item_type=IntType(gt=None, ge=None, lt=None, le=None, multiple_of=None), min_length=None, max_length=None) |
15,051 | from __future__ import annotations
import datetime
import decimal
import enum
import uuid
from collections.abc import Iterable
from typing import (
Any,
Final,
Literal,
Tuple,
Type as typing_Type,
TypeVar,
Union,
)
import msgspec
from msgspec import NODEFAULT, UNSET, UnsetType as _UnsetType
from ._core import ( # type: ignore
Factory as _Factory,
to_builtins as _to_builtins,
)
from ._utils import ( # type: ignore
_CONCRETE_TYPES,
_AnnotatedAlias,
get_class_annotations as _get_class_annotations,
get_dataclass_info as _get_dataclass_info,
get_typeddict_info as _get_typeddict_info,
)
_CONCRETE_TYPES = {
list: list,
tuple: tuple,
set: set,
frozenset: frozenset,
dict: dict,
typing.List: list,
typing.Tuple: tuple,
typing.Set: set,
typing.FrozenSet: frozenset,
typing.Dict: dict,
typing.Collection: list,
typing.MutableSequence: list,
typing.Sequence: list,
typing.MutableMapping: dict,
typing.Mapping: dict,
typing.MutableSet: set,
typing.AbstractSet: set,
collections.abc.Collection: list,
collections.abc.MutableSequence: list,
collections.abc.Sequence: list,
collections.abc.MutableSet: set,
collections.abc.Set: set,
collections.abc.MutableMapping: dict,
collections.abc.Mapping: dict,
}
def _origin_args_metadata(t):
# Strip wrappers (Annotated, NewType, Final) until we hit a concrete type
metadata = []
while True:
try:
origin = _CONCRETE_TYPES.get(t)
except TypeError:
# t is not hashable
origin = None
if origin is not None:
args = None
break
origin = getattr(t, "__origin__", None)
if origin is not None:
if type(t) is _AnnotatedAlias:
metadata.extend(m for m in t.__metadata__ if type(m) is msgspec.Meta)
t = origin
elif origin == Final:
t = t.__args__[0]
elif type(origin) is _TypeAliasType:
t = origin.__value__[t.__args__]
else:
args = getattr(t, "__args__", None)
origin = _CONCRETE_TYPES.get(origin, origin)
break
else:
supertype = getattr(t, "__supertype__", None)
if supertype is not None:
t = supertype
elif type(t) is _TypeAliasType:
t = t.__value__
else:
origin = t
args = None
break
if type(origin) is _types_UnionType:
args = origin.__args__
origin = Union
return origin, args, tuple(metadata) | null |
15,052 | from __future__ import annotations
import datetime
import decimal
import enum
import uuid
from collections.abc import Iterable
from typing import (
Any,
Final,
Literal,
Tuple,
Type as typing_Type,
TypeVar,
Union,
)
import msgspec
from msgspec import NODEFAULT, UNSET, UnsetType as _UnsetType
from ._core import ( # type: ignore
Factory as _Factory,
to_builtins as _to_builtins,
)
from ._utils import ( # type: ignore
_CONCRETE_TYPES,
_AnnotatedAlias,
get_class_annotations as _get_class_annotations,
get_dataclass_info as _get_dataclass_info,
get_typeddict_info as _get_typeddict_info,
)
def _is_struct(t):
return type(t) is type(msgspec.Struct) | null |
15,053 | from __future__ import annotations
import datetime
import decimal
import enum
import uuid
from collections.abc import Iterable
from typing import (
Any,
Final,
Literal,
Tuple,
Type as typing_Type,
TypeVar,
Union,
)
import msgspec
from msgspec import NODEFAULT, UNSET, UnsetType as _UnsetType
from ._core import ( # type: ignore
Factory as _Factory,
to_builtins as _to_builtins,
)
from ._utils import ( # type: ignore
_CONCRETE_TYPES,
_AnnotatedAlias,
get_class_annotations as _get_class_annotations,
get_dataclass_info as _get_dataclass_info,
get_typeddict_info as _get_typeddict_info,
)
def _is_enum(t):
return type(t) is enum.EnumMeta | null |
15,054 | from __future__ import annotations
import datetime
import decimal
import enum
import uuid
from collections.abc import Iterable
from typing import (
Any,
Final,
Literal,
Tuple,
Type as typing_Type,
TypeVar,
Union,
)
import msgspec
from msgspec import NODEFAULT, UNSET, UnsetType as _UnsetType
from ._core import ( # type: ignore
Factory as _Factory,
to_builtins as _to_builtins,
)
from ._utils import ( # type: ignore
_CONCRETE_TYPES,
_AnnotatedAlias,
get_class_annotations as _get_class_annotations,
get_dataclass_info as _get_dataclass_info,
get_typeddict_info as _get_typeddict_info,
)
def _is_dataclass(t):
return hasattr(t, "__dataclass_fields__") | null |
15,055 | from __future__ import annotations
import datetime
import decimal
import enum
import uuid
from collections.abc import Iterable
from typing import (
Any,
Final,
Literal,
Tuple,
Type as typing_Type,
TypeVar,
Union,
)
import msgspec
from msgspec import NODEFAULT, UNSET, UnsetType as _UnsetType
from ._core import ( # type: ignore
Factory as _Factory,
to_builtins as _to_builtins,
)
from ._utils import ( # type: ignore
_CONCRETE_TYPES,
_AnnotatedAlias,
get_class_annotations as _get_class_annotations,
get_dataclass_info as _get_dataclass_info,
get_typeddict_info as _get_typeddict_info,
)
def _is_attrs(t):
return hasattr(t, "__attrs_attrs__") | null |
15,056 | from __future__ import annotations
import datetime
import decimal
import enum
import uuid
from collections.abc import Iterable
from typing import (
Any,
Final,
Literal,
Tuple,
Type as typing_Type,
TypeVar,
Union,
)
import msgspec
from msgspec import NODEFAULT, UNSET, UnsetType as _UnsetType
from ._core import ( # type: ignore
Factory as _Factory,
to_builtins as _to_builtins,
)
from ._utils import ( # type: ignore
_CONCRETE_TYPES,
_AnnotatedAlias,
get_class_annotations as _get_class_annotations,
get_dataclass_info as _get_dataclass_info,
get_typeddict_info as _get_typeddict_info,
)
def _is_typeddict(t):
try:
return issubclass(t, dict) and hasattr(t, "__total__")
except TypeError:
return False | null |
15,057 | from __future__ import annotations
import datetime
import decimal
import enum
import uuid
from collections.abc import Iterable
from typing import (
Any,
Final,
Literal,
Tuple,
Type as typing_Type,
TypeVar,
Union,
)
import msgspec
from msgspec import NODEFAULT, UNSET, UnsetType as _UnsetType
from ._core import ( # type: ignore
Factory as _Factory,
to_builtins as _to_builtins,
)
from ._utils import ( # type: ignore
_CONCRETE_TYPES,
_AnnotatedAlias,
get_class_annotations as _get_class_annotations,
get_dataclass_info as _get_dataclass_info,
get_typeddict_info as _get_typeddict_info,
)
def _is_namedtuple(t):
try:
return issubclass(t, tuple) and hasattr(t, "_fields")
except TypeError:
return False | null |
15,058 | from __future__ import annotations
import datetime
import decimal
import enum
import uuid
from collections.abc import Iterable
from typing import (
Any,
Final,
Literal,
Tuple,
Type as typing_Type,
TypeVar,
Union,
)
import msgspec
from msgspec import NODEFAULT, UNSET, UnsetType as _UnsetType
from ._core import ( # type: ignore
Factory as _Factory,
to_builtins as _to_builtins,
)
from ._utils import ( # type: ignore
_CONCRETE_TYPES,
_AnnotatedAlias,
get_class_annotations as _get_class_annotations,
get_dataclass_info as _get_dataclass_info,
get_typeddict_info as _get_typeddict_info,
)
def _merge_json(a, b):
if b:
a = a.copy()
for key, b_val in b.items():
if key in a:
a_val = a[key]
if isinstance(a_val, dict) and isinstance(b_val, dict):
a[key] = _merge_json(a_val, b_val)
elif isinstance(a_val, (list, tuple)) and isinstance(
b_val, (list, tuple)
):
a[key] = list(a_val) + list(b_val)
else:
a[key] = b_val
else:
a[key] = b_val
return a | null |
15,060 | import datetime as _datetime
from typing import Any, Callable, Optional, Type, TypeVar, Union, overload, Literal
from . import (
DecodeError as _DecodeError,
convert as _convert,
to_builtins as _to_builtins,
)
def _import_pyyaml(name):
try:
import yaml # type: ignore
except ImportError:
raise ImportError(
f"`msgspec.yaml.{name}` requires PyYAML be installed.\n\n"
"Please either `pip` or `conda` install it as follows:\n\n"
" $ python -m pip install pyyaml # using pip\n"
" $ conda install pyyaml # or using conda"
) from None
else:
return yaml
The provided code snippet includes necessary dependencies for implementing the `encode` function. Write a Python function `def encode( obj: Any, *, enc_hook: Optional[Callable[[Any], Any]] = None, order: Literal[None, "deterministic", "sorted"] = None, ) -> bytes` to solve the following problem:
Serialize an object as YAML. Parameters ---------- obj : Any The object to serialize. enc_hook : callable, optional A callable to call for objects that aren't supported msgspec types. Takes the unsupported object and should return a supported object, or raise a ``NotImplementedError`` if unsupported. order : {None, 'deterministic', 'sorted'}, optional The ordering to use when encoding unordered compound types. - ``None``: All objects are encoded in the most efficient manner matching their in-memory representations. The default. - `'deterministic'`: Unordered collections (sets, dicts) are sorted to ensure a consistent output between runs. Useful when comparison/hashing of the encoded binary output is necessary. - `'sorted'`: Like `'deterministic'`, but *all* object-like types (structs, dataclasses, ...) are also sorted by field name before encoding. This is slower than `'deterministic'`, but may produce more human-readable output. Returns ------- data : bytes The serialized object. Notes ----- This function requires that the third-party `PyYAML library <https://pyyaml.org/>`_ is installed. See Also -------- decode
Here is the function:
def encode(
obj: Any,
*,
enc_hook: Optional[Callable[[Any], Any]] = None,
order: Literal[None, "deterministic", "sorted"] = None,
) -> bytes:
"""Serialize an object as YAML.
Parameters
----------
obj : Any
The object to serialize.
enc_hook : callable, optional
A callable to call for objects that aren't supported msgspec types.
Takes the unsupported object and should return a supported object, or
raise a ``NotImplementedError`` if unsupported.
order : {None, 'deterministic', 'sorted'}, optional
The ordering to use when encoding unordered compound types.
- ``None``: All objects are encoded in the most efficient manner
matching their in-memory representations. The default.
- `'deterministic'`: Unordered collections (sets, dicts) are sorted to
ensure a consistent output between runs. Useful when
comparison/hashing of the encoded binary output is necessary.
- `'sorted'`: Like `'deterministic'`, but *all* object-like types
(structs, dataclasses, ...) are also sorted by field name before
encoding. This is slower than `'deterministic'`, but may produce more
human-readable output.
Returns
-------
data : bytes
The serialized object.
Notes
-----
This function requires that the third-party `PyYAML library
<https://pyyaml.org/>`_ is installed.
See Also
--------
decode
"""
yaml = _import_pyyaml("encode")
# Use the C extension if available
Dumper = getattr(yaml, "CSafeDumper", yaml.SafeDumper)
return yaml.dump_all(
[
_to_builtins(
obj,
builtin_types=(_datetime.datetime, _datetime.date),
enc_hook=enc_hook,
order=order,
)
],
encoding="utf-8",
Dumper=Dumper,
allow_unicode=True,
sort_keys=False,
) | Serialize an object as YAML. Parameters ---------- obj : Any The object to serialize. enc_hook : callable, optional A callable to call for objects that aren't supported msgspec types. Takes the unsupported object and should return a supported object, or raise a ``NotImplementedError`` if unsupported. order : {None, 'deterministic', 'sorted'}, optional The ordering to use when encoding unordered compound types. - ``None``: All objects are encoded in the most efficient manner matching their in-memory representations. The default. - `'deterministic'`: Unordered collections (sets, dicts) are sorted to ensure a consistent output between runs. Useful when comparison/hashing of the encoded binary output is necessary. - `'sorted'`: Like `'deterministic'`, but *all* object-like types (structs, dataclasses, ...) are also sorted by field name before encoding. This is slower than `'deterministic'`, but may produce more human-readable output. Returns ------- data : bytes The serialized object. Notes ----- This function requires that the third-party `PyYAML library <https://pyyaml.org/>`_ is installed. See Also -------- decode |
15,064 | import datetime as _datetime
from typing import Any, Callable, Optional, Type, TypeVar, Union, overload, Literal
from . import (
DecodeError as _DecodeError,
convert as _convert,
to_builtins as _to_builtins,
)
def _import_pyyaml(name):
try:
import yaml # type: ignore
except ImportError:
raise ImportError(
f"`msgspec.yaml.{name}` requires PyYAML be installed.\n\n"
"Please either `pip` or `conda` install it as follows:\n\n"
" $ python -m pip install pyyaml # using pip\n"
" $ conda install pyyaml # or using conda"
) from None
else:
return yaml
The provided code snippet includes necessary dependencies for implementing the `decode` function. Write a Python function `def decode(buf, *, type=Any, strict=True, dec_hook=None)` to solve the following problem:
Deserialize an object from YAML. Parameters ---------- buf : bytes-like or str The message to decode. type : type, optional A Python type (in type annotation form) to decode the object as. If provided, the message will be type checked and decoded as the specified type. Defaults to `Any`, in which case the message will be decoded using the default YAML types. strict : bool, optional Whether type coercion rules should be strict. Setting to False enables a wider set of coercion rules from string to non-string types for all values. Default is True. dec_hook : callable, optional An optional callback for handling decoding custom types. Should have the signature ``dec_hook(type: Type, obj: Any) -> Any``, where ``type`` is the expected message type, and ``obj`` is the decoded representation composed of only basic YAML types. This hook should transform ``obj`` into type ``type``, or raise a ``NotImplementedError`` if unsupported. Returns ------- obj : Any The deserialized object. Notes ----- This function requires that the third-party `PyYAML library <https://pyyaml.org/>`_ is installed. See Also -------- encode
Here is the function:
def decode(buf, *, type=Any, strict=True, dec_hook=None):
"""Deserialize an object from YAML.
Parameters
----------
buf : bytes-like or str
The message to decode.
type : type, optional
A Python type (in type annotation form) to decode the object as. If
provided, the message will be type checked and decoded as the specified
type. Defaults to `Any`, in which case the message will be decoded
using the default YAML types.
strict : bool, optional
Whether type coercion rules should be strict. Setting to False enables
a wider set of coercion rules from string to non-string types for all
values. Default is True.
dec_hook : callable, optional
An optional callback for handling decoding custom types. Should have
the signature ``dec_hook(type: Type, obj: Any) -> Any``, where ``type``
is the expected message type, and ``obj`` is the decoded representation
composed of only basic YAML types. This hook should transform ``obj``
into type ``type``, or raise a ``NotImplementedError`` if unsupported.
Returns
-------
obj : Any
The deserialized object.
Notes
-----
This function requires that the third-party `PyYAML library
<https://pyyaml.org/>`_ is installed.
See Also
--------
encode
"""
yaml = _import_pyyaml("decode")
# Use the C extension if available
Loader = getattr(yaml, "CSafeLoader", yaml.SafeLoader)
if not isinstance(buf, (str, bytes)):
# call `memoryview` first, since `bytes(1)` is actually valid
buf = bytes(memoryview(buf))
try:
obj = yaml.load(buf, Loader)
except yaml.YAMLError as exc:
raise _DecodeError(str(exc)) from None
if type is Any:
return obj
return _convert(
obj,
type,
builtin_types=(_datetime.datetime, _datetime.date),
strict=strict,
dec_hook=dec_hook,
) | Deserialize an object from YAML. Parameters ---------- buf : bytes-like or str The message to decode. type : type, optional A Python type (in type annotation form) to decode the object as. If provided, the message will be type checked and decoded as the specified type. Defaults to `Any`, in which case the message will be decoded using the default YAML types. strict : bool, optional Whether type coercion rules should be strict. Setting to False enables a wider set of coercion rules from string to non-string types for all values. Default is True. dec_hook : callable, optional An optional callback for handling decoding custom types. Should have the signature ``dec_hook(type: Type, obj: Any) -> Any``, where ``type`` is the expected message type, and ``obj`` is the decoded representation composed of only basic YAML types. This hook should transform ``obj`` into type ``type``, or raise a ``NotImplementedError`` if unsupported. Returns ------- obj : Any The deserialized object. Notes ----- This function requires that the third-party `PyYAML library <https://pyyaml.org/>`_ is installed. See Also -------- encode |
15,065 | import math
import os
import textwrap
n_shifts, shifts, n_powers, powers = gen_hpd_tables()
def gen_hpd_tables():
log2log10 = math.log(2) / math.log(10)
shifts = ["0x0000"]
powers = []
for i in range(1, 61):
offset = len(powers)
assert offset <= 0x07FF
num_new_digits = int(log2log10 * float(i)) + 1
assert num_new_digits <= 31
code = (num_new_digits << 11) | offset
p = str(5**i)
powers.extend(p)
shifts.append("0x%04X" % code)
for i in range(61, 65):
shifts.append("0x%04X" % len(powers))
n_shifts = len(shifts)
n_powers = len(powers)
assert n_powers <= 0x07FF
shifts_str = "\n".join(textwrap.wrap(", ".join(shifts), width=78))
powers_str = "\n".join(textwrap.wrap(", ".join(powers), width=78))
return n_shifts, shifts_str, n_powers, powers_str | null |
15,066 | import math
import os
import textwrap
def gen_row(e):
z = 1 << 2048
if e >= 0:
exp = 10**e
z = z * exp
else:
exp = 10 ** (-e)
z = z // exp
n = -2048
while z >= (1 << 128):
z = z >> 1
n += 1
h = hex(z)[2:]
assert len(h) == 32
approx_n = ((217706 * e) >> 16) + 1087
biased_n = 1214 + n
assert approx_n == biased_n
return "{0x%s, 0x%s}, // 1e%-04d" % (h[16:], h[:16], e) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.