id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
19,978 | import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
def is_npu_available():
"""Returns a bool indicating if NPU is currently available."""
return hasattr(torch, 'npu') and torch.npu.is_available()
The provided code snippet includes necessary dependencies for implementing the `get_device` function. Write a Python function `def get_device()` to solve the following problem:
Returns an available device, cpu, cuda or npu.
Here is the function:
def get_device():
"""Returns an available device, cpu, cuda or npu."""
is_device_available = {
'npu': is_npu_available(),
'cuda': torch.cuda.is_available()
}
device_list = [k for k, v in is_device_available.items() if v]
return device_list[0] if len(device_list) >= 1 else 'cpu' | Returns an available device, cpu, cuda or npu. |
19,979 | import logging
from mmcv.utils import get_logger
The provided code snippet includes necessary dependencies for implementing the `get_root_logger` function. Write a Python function `def get_root_logger(log_file=None, log_level=logging.INFO)` to solve the following problem:
Get root logger. Args: log_file (str): File path of log. Defaults to None. log_level (int): The level of logger. Defaults to logging.INFO. Returns: :obj:`logging.Logger`: The obtained logger
Here is the function:
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get root logger.
Args:
log_file (str): File path of log. Defaults to None.
log_level (int): The level of logger. Defaults to logging.INFO.
Returns:
:obj:`logging.Logger`: The obtained logger
"""
return get_logger('mmtrack', log_file, log_level) | Get root logger. Args: log_file (str): File path of log. Defaults to None. log_level (int): The level of logger. Defaults to logging.INFO. Returns: :obj:`logging.Logger`: The obtained logger |
19,980 | import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
import torch.distributed as dist
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmdet.apis import set_random_seed
from mmtrack import __version__
from mmtrack.apis import init_random_seed
from mmtrack.core import setup_multi_processes
from mmtrack.datasets import build_dataset
from mmtrack.utils import collect_env, get_device, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a model')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='(Deprecated, please use --gpu-id) number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='(Deprecated, please use --gpu-id) ids of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-id',
type=int,
default=0,
help='id of gpu to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--diff_seed',
action='store_true',
help='Whether or not set different seeds for different ranks')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args | null |
19,981 | from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
import mmcv
The provided code snippet includes necessary dependencies for implementing the `mmtrack2torchserve` function. Write a Python function `def mmtrack2torchserve( config_file: str, checkpoint_file: str, output_folder: str, model_name: str, model_version: str = '1.0', force: bool = False, )` to solve the following problem:
Converts mmtracking model (config + checkpoint) to TorchServe `.mar`. Args: config_file (str): In MMTracking config format. The contents vary for each task repository. checkpoint_file (str): In MMTracking checkpoint format. The contents vary for each task repository. output_folder (str): Folder where `{model_name}.mar` will be created. The file created will be in TorchServe archive format. model_name (str): If not None, used for naming the `{model_name}.mar` file that will be created under `output_folder`. If None, `{Path(checkpoint_file).stem}` will be used. model_version (int): Model's version. force (bool): If True, if there is an existing `{model_name}.mar` file under `output_folder` it will be overwritten.
Here is the function:
def mmtrack2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts mmtracking model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file (str):
In MMTracking config format.
The contents vary for each task repository.
checkpoint_file (str):
In MMTracking checkpoint format.
The contents vary for each task repository.
output_folder (str):
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name (str):
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version (int):
Model's version.
force (bool):
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mmcv.mkdir_or_exist(output_folder)
config = mmcv.Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmtrack_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest) | Converts mmtracking model (config + checkpoint) to TorchServe `.mar`. Args: config_file (str): In MMTracking config format. The contents vary for each task repository. checkpoint_file (str): In MMTracking checkpoint format. The contents vary for each task repository. output_folder (str): Folder where `{model_name}.mar` will be created. The file created will be in TorchServe archive format. model_name (str): If not None, used for naming the `{model_name}.mar` file that will be created under `output_folder`. If None, `{Path(checkpoint_file).stem}` will be used. model_version (int): Model's version. force (bool): If True, if there is an existing `{model_name}.mar` file under `output_folder` it will be overwritten. |
19,982 | from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
import mmcv
def parse_args():
parser = ArgumentParser(
description='Convert mmtrack models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args | null |
19,983 | import argparse
import os
import os.path as osp
from collections import defaultdict
import mmcv
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description='UAV123 dataset to COCO Video format')
parser.add_argument(
'-i',
'--input',
help='root directory of UAV123 dataset',
)
parser.add_argument(
'-o',
'--output',
help='directory to save coco formatted label file',
)
return parser.parse_args() | null |
19,984 | import argparse
import os
import os.path as osp
from collections import defaultdict
import mmcv
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `convert_uav123` function. Write a Python function `def convert_uav123(uav123, ann_dir, save_dir)` to solve the following problem:
Convert trackingnet dataset to COCO style. Args: uav123 (dict): The converted COCO style annotations. ann_dir (str): The path of trackingnet test dataset save_dir (str): The path to save `uav123`.
Here is the function:
def convert_uav123(uav123, ann_dir, save_dir):
"""Convert trackingnet dataset to COCO style.
Args:
uav123 (dict): The converted COCO style annotations.
ann_dir (str): The path of trackingnet test dataset
save_dir (str): The path to save `uav123`.
"""
# The format of each line in "uav_info123.txt" is
# "anno_name,anno_path,video_path,start_frame,end_frame"
info_path = osp.join(
os.path.dirname(__file__), 'uav123_info_deprecated.txt')
uav_info = mmcv.list_from_file(info_path)[1:]
records = dict(vid_id=1, img_id=1, ann_id=1, global_instance_id=1)
uav123['categories'] = [dict(id=0, name=0)]
for info in tqdm(uav_info):
anno_name, anno_path, video_path, start_frame, end_frame = info.split(
',')
start_frame = int(start_frame)
end_frame = int(end_frame)
# video_name is not the same as anno_name since one video may have
# several fragments.
# Example: video_name: "bird" anno_name: "bird_1"
video_name = video_path.split(os.sep)[-1]
video = dict(id=records['vid_id'], name=video_name)
uav123['videos'].append(video)
gt_bboxes = mmcv.list_from_file(osp.join(ann_dir, anno_path))
assert len(gt_bboxes) == end_frame - start_frame + 1
img = mmcv.imread(
osp.join(ann_dir, video_path, '%06d.jpg' % (start_frame)))
height, width, _ = img.shape
for frame_id, src_frame_id in enumerate(
range(start_frame, end_frame + 1)):
file_name = osp.join(video_name, '%06d.jpg' % (src_frame_id))
image = dict(
file_name=file_name,
height=height,
width=width,
id=records['img_id'],
frame_id=frame_id,
video_id=records['vid_id'])
uav123['images'].append(image)
if 'NaN' in gt_bboxes[frame_id]:
x1 = y1 = w = h = 0
else:
x1, y1, w, h = gt_bboxes[frame_id].split(',')
ann = dict(
id=records['ann_id'],
video_id=records['vid_id'],
image_id=records['img_id'],
instance_id=records['global_instance_id'],
category_id=0,
bbox=[int(x1), int(y1), int(w),
int(h)],
area=int(w) * int(h))
uav123['annotations'].append(ann)
records['ann_id'] += 1
records['img_id'] += 1
records['global_instance_id'] += 1
records['vid_id'] += 1
if not osp.isdir(save_dir):
os.makedirs(save_dir)
mmcv.dump(uav123, osp.join(save_dir, 'uav123.json'))
print('-----UAV123 Dataset------')
print(f'{records["vid_id"]- 1} videos')
print(f'{records["global_instance_id"]- 1} instances')
print(f'{records["img_id"]- 1} images')
print(f'{records["ann_id"] - 1} objects')
print('-----------------------------') | Convert trackingnet dataset to COCO style. Args: uav123 (dict): The converted COCO style annotations. ann_dir (str): The path of trackingnet test dataset save_dir (str): The path to save `uav123`. |
19,985 | import argparse
import glob
import os
import os.path as osp
from collections import defaultdict
import mmcv
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description='GOT10k dataset to COCO Video format')
parser.add_argument(
'-i',
'--input',
help='root directory of GOT10k dataset',
)
parser.add_argument(
'-o',
'--output',
help='directory to save coco formatted label file',
)
parser.add_argument(
'--split',
help="the split set of GOT10k, 'all' denotes the whole dataset",
choices=['train', 'test', 'val', 'all'],
default='all')
return parser.parse_args() | null |
19,986 | import argparse
import glob
import os
import os.path as osp
from collections import defaultdict
import mmcv
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `convert_got10k` function. Write a Python function `def convert_got10k(ann_dir, save_dir, split='test')` to solve the following problem:
Convert got10k dataset to COCO style. Args: ann_dir (str): The path of got10k dataset save_dir (str): The path to save `got10k`. split (str): the split ('train', 'val' or 'test') of dataset.
Here is the function:
def convert_got10k(ann_dir, save_dir, split='test'):
"""Convert got10k dataset to COCO style.
Args:
ann_dir (str): The path of got10k dataset
save_dir (str): The path to save `got10k`.
split (str): the split ('train', 'val' or 'test') of dataset.
"""
assert split in ['train', 'test', 'val'], f'split [{split}] does not exist'
got10k = defaultdict(list)
records = dict(vid_id=1, img_id=1, ann_id=1, global_instance_id=1)
got10k['categories'] = [dict(id=0, name=0)]
videos_list = mmcv.list_from_file(osp.join(ann_dir, split, 'list.txt'))
for video_name in tqdm(videos_list, desc=split):
video = dict(id=records['vid_id'], name=video_name)
got10k['videos'].append(video)
video_path = osp.join(ann_dir, split, video_name)
ann_file = osp.join(video_path, 'groundtruth.txt')
gt_bboxes = mmcv.list_from_file(ann_file)
img_files = glob.glob(osp.join(video_path, '*.jpg'))
img_files = sorted(
img_files, key=lambda x: int(x.split(os.sep)[-1][:-4]))
img = mmcv.imread(osp.join(video_path, '00000001.jpg'))
height, width, _ = img.shape
if split in ['train', 'val']:
absence_label = mmcv.list_from_file(
osp.join(video_path, 'absence.label'))
# cover_label denotes the ranges of object visible ratios, ant it's
# in range [0,8] which correspond to ranges of object visible
# ratios: 0%, (0%, 15%], (15%~30%], (30%, 45%], (45%, 60%],
# (60%, 75%], (75%, 90%], (90%, 100%) and 100% respectively
cover_label = mmcv.list_from_file(
osp.join(video_path, 'cover.label'))
# cut_by_image_label denotes whether the object is cut by the image
# boundary.
cut_by_image_label = mmcv.list_from_file(
osp.join(video_path, 'cut_by_image.label'))
for frame_id, img_file in enumerate(img_files):
img_name = img_file.split(os.sep)[-1]
# the images' root is not included in file_name
file_name = osp.join(video_name, img_name)
image = dict(
file_name=file_name,
height=height,
width=width,
id=records['img_id'],
frame_id=frame_id,
video_id=records['vid_id'])
got10k['images'].append(image)
ann = dict(
id=records['ann_id'],
video_id=records['vid_id'],
image_id=records['img_id'],
instance_id=records['global_instance_id'],
category_id=0)
if split == 'test':
if frame_id == 0:
bbox = list(map(float, gt_bboxes[0].split(',')))
else:
bbox = [0., 0., 0., 0.]
ann.update(dict(bbox=bbox, area=bbox[2] * bbox[3]))
else:
bbox = list(map(float, gt_bboxes[frame_id].split(',')))
ann.update(
dict(
bbox=bbox,
area=bbox[2] * bbox[3],
absence=absence_label[frame_id] == '1',
cover=int(cover_label[frame_id]),
cut_by_image=cut_by_image_label[frame_id] == '1'))
got10k['annotations'].append(ann)
records['ann_id'] += 1
records['img_id'] += 1
records['global_instance_id'] += 1
records['vid_id'] += 1
if not osp.isdir(save_dir):
os.makedirs(save_dir)
mmcv.dump(got10k, osp.join(save_dir, f'got10k_{split}.json'))
print(f'-----GOT10k {split} Dataset------')
print(f'{records["vid_id"]- 1} videos')
print(f'{records["global_instance_id"]- 1} instances')
print(f'{records["img_id"]- 1} images')
print(f'{records["ann_id"] - 1} objects')
print('-----------------------------') | Convert got10k dataset to COCO style. Args: ann_dir (str): The path of got10k dataset save_dir (str): The path to save `got10k`. split (str): the split ('train', 'val' or 'test') of dataset. |
19,987 | import argparse
import glob
import os
import os.path as osp
import time
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(
description='Generate the information of GOT10k dataset')
parser.add_argument(
'-i',
'--input',
help='root directory of GOT10k dataset',
)
parser.add_argument(
'-o',
'--output',
help='directory to save text file',
)
parser.add_argument(
'--split',
help="the split set of GOT10k, 'all' denotes the whole dataset",
choices=['train', 'test', 'val', 'train_vot', 'val_vot', 'all'],
default='all')
return parser.parse_args() | null |
19,988 | import argparse
import glob
import os
import os.path as osp
import time
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `gen_data_infos` function. Write a Python function `def gen_data_infos(data_root, save_dir, split='train')` to solve the following problem:
Generate dataset information. Args: data_root (str): The path of dataset. save_dir (str): The path to save the information of dataset. split (str): the split ('train' or 'test') of dataset.
Here is the function:
def gen_data_infos(data_root, save_dir, split='train'):
"""Generate dataset information.
Args:
data_root (str): The path of dataset.
save_dir (str): The path to save the information of dataset.
split (str): the split ('train' or 'test') of dataset.
"""
print(f'Generate the information of {split} set of LaSOT dataset...')
start_time = time.time()
assert split in ['train', 'val', 'test', 'val_vot', 'train_vot']
if split in ['train', 'val', 'test']:
videos_list = np.loadtxt(
osp.join(data_root, split, 'list.txt'), dtype=np.str_)
else:
split_reverse = '_'.join(split.split('_')[::-1])
vids_id_list = np.loadtxt(
osp.join(data_root, 'train', f'got10k_{split_reverse}_split.txt'),
dtype=float)
videos_list = [
'GOT-10k_Train_%06d' % (int(video_id) + 1)
for video_id in vids_id_list
]
if not osp.isdir(save_dir):
os.makedirs(save_dir, exist_ok=True)
with open(osp.join(save_dir, f'got10k_{split}_infos.txt'), 'w') as f:
f.write('The format of each line in this txt is '
'(video_path,annotation_path,start_frame_id,end_frame_id)')
videos_list = sorted(videos_list)
for video_name in videos_list:
if split in ['val', 'test']:
video_path = osp.join(split, video_name)
else:
video_path = osp.join('train', video_name)
ann_path = osp.join(video_path, 'groundtruth.txt')
img_names = glob.glob(osp.join(data_root, video_path, '*.jpg'))
end_frame_name = max(
img_names, key=lambda x: int(osp.basename(x).split('.')[0]))
end_frame_id = int(osp.basename(end_frame_name).split('.')[0])
f.write(f'\n{video_path},{ann_path},1,{end_frame_id}')
print(f'Done! ({time.time()-start_time:.2f} s)')
print(f'The results are saved in {save_dir}') | Generate dataset information. Args: data_root (str): The path of dataset. save_dir (str): The path to save the information of dataset. split (str): the split ('train' or 'test') of dataset. |
19,989 | import argparse
import glob
import os
import os.path as osp
import time
def parse_args():
parser = argparse.ArgumentParser(
description='Generate the information of TrackingNet dataset')
parser.add_argument(
'-i',
'--input',
help='root directory of TrackingNet dataset',
)
parser.add_argument(
'-o',
'--output',
help='directory to save text file',
)
parser.add_argument(
'--split',
help="the split set of TrackingNet,'all' denotes the whole dataset",
choices=['train', 'test', 'all'],
default='all')
parser.add_argument(
'--chunks',
help='the chunks of train set of TrackingNet',
nargs='+',
type=int,
default=['all'])
return parser.parse_args() | null |
19,990 | import argparse
import glob
import os
import os.path as osp
import time
The provided code snippet includes necessary dependencies for implementing the `gen_data_infos` function. Write a Python function `def gen_data_infos(data_root, save_dir, split='train', chunks=['all'])` to solve the following problem:
Generate dataset information. args: data_root (str): The path of dataset. save_dir (str): The path to save the information of dataset. split (str): the split ('train' or 'test') of dataset. chunks (list): the chunks of train set of TrackingNet.
Here is the function:
def gen_data_infos(data_root, save_dir, split='train', chunks=['all']):
"""Generate dataset information.
args:
data_root (str): The path of dataset.
save_dir (str): The path to save the information of dataset.
split (str): the split ('train' or 'test') of dataset.
chunks (list): the chunks of train set of TrackingNet.
"""
print(f'Generate the information of {split} set of TrackingNet dataset...')
start_time = time.time()
if split == 'test':
chunks_list = ['TEST']
elif split == 'train':
if 'all' in chunks:
chunks_list = [f'TRAIN_{i}' for i in range(12)]
else:
chunks_list = [f'TRAIN_{chunk}' for chunk in chunks]
else:
raise NotImplementedError
if not osp.isdir(save_dir):
os.makedirs(save_dir, exist_ok=True)
assert len(chunks_list) > 0
with open(osp.join(save_dir, f'trackingnet_{split}_infos.txt'), 'w') as f:
f.write(
'The format of each line in this txt is '
'(chunk,video_path,annotation_path,start_frame_id,end_frame_id)')
for chunk in chunks_list:
chunk_ann_dir = osp.join(data_root, chunk)
assert osp.isdir(
chunk_ann_dir
), f'annotation directory {chunk_ann_dir} does not exist'
videos_list = sorted(os.listdir(osp.join(chunk_ann_dir, 'frames')))
for video_name in videos_list:
video_path = osp.join(chunk, 'frames', video_name)
# avoid creating empty file folds by mistakes
if not os.listdir(osp.join(data_root, video_path)):
continue
ann_path = osp.join(chunk, 'anno', video_name + '.txt')
img_names = glob.glob(osp.join(data_root, video_path, '*.jpg'))
end_frame_name = max(
img_names,
key=lambda x: int(osp.basename(x).split('.')[0]))
end_frame_id = int(osp.basename(end_frame_name).split('.')[0])
f.write(f'\n{video_path},{ann_path},0,{end_frame_id}')
print(f'Done! ({time.time()-start_time:.2f} s)')
print(f'The results are saved in {save_dir}') | Generate dataset information. args: data_root (str): The path of dataset. save_dir (str): The path to save the information of dataset. split (str): the split ('train' or 'test') of dataset. chunks (list): the chunks of train set of TrackingNet. |
19,991 | import argparse
import os
import os.path as osp
from collections import defaultdict
import mmcv
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description='TrackingNet test dataset to COCO Video format')
parser.add_argument(
'-i',
'--input',
help='root directory of TrackingNet test dataset',
)
parser.add_argument(
'-o',
'--output',
help='directory to save coco formatted label file',
)
parser.add_argument(
'--split',
help="the split set of trackingnet,'all' denotes the whole dataset",
choices=['train', 'test', 'all'],
default='all')
return parser.parse_args() | null |
19,992 | import argparse
import os
import os.path as osp
from collections import defaultdict
import mmcv
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `convert_trackingnet` function. Write a Python function `def convert_trackingnet(ann_dir, save_dir, split='test')` to solve the following problem:
Convert trackingnet dataset to COCO style. Args: ann_dir (str): The path of trackingnet test dataset save_dir (str): The path to save `trackingnet`. split (str): the split ('train' or 'test') of dataset.
Here is the function:
def convert_trackingnet(ann_dir, save_dir, split='test'):
"""Convert trackingnet dataset to COCO style.
Args:
ann_dir (str): The path of trackingnet test dataset
save_dir (str): The path to save `trackingnet`.
split (str): the split ('train' or 'test') of dataset.
"""
if split == 'test':
chunks = ['TEST']
elif split == 'train':
chunks = [f'TRAIN_{i}' for i in range(12)]
else:
NotImplementedError
trackingnet = defaultdict(list)
records = dict(vid_id=1, img_id=1, ann_id=1, global_instance_id=1)
trackingnet['categories'] = [dict(id=0, name=0)]
for chunk in chunks:
chunk_ann_dir = osp.join(ann_dir, chunk)
assert osp.isdir(
chunk_ann_dir
), f'annotation directory {chunk_ann_dir} does not exist'
videos_list = os.listdir(osp.join(chunk_ann_dir, 'frames'))
for video_name in tqdm(videos_list, desc=f'[{chunk}]'):
video = dict(id=records['vid_id'], name=video_name)
trackingnet['videos'].append(video)
ann_file = osp.join(chunk_ann_dir, 'anno', video_name + '.txt')
gt_bboxes = mmcv.list_from_file(ann_file)
video_path = osp.join(chunk_ann_dir, 'frames', video_name)
img_names = os.listdir(video_path)
img_names = sorted(img_names, key=lambda x: int(x[:-4]))
img = mmcv.imread(osp.join(video_path, '0.jpg'))
height, width, _ = img.shape
for frame_id, img_name in enumerate(img_names):
file_name = '%d' % (frame_id) + '.jpg'
assert img_name == file_name
# the images' root is not included in file_name
file_name = osp.join(chunk, 'frames', video_name, img_name)
image = dict(
file_name=file_name,
height=height,
width=width,
id=records['img_id'],
frame_id=frame_id,
video_id=records['vid_id'])
trackingnet['images'].append(image)
if split == 'test':
if frame_id == 0:
bbox = list(map(float, gt_bboxes[0].split(',')))
else:
bbox = [0., 0., 0., 0.]
else:
bbox = list(map(float, gt_bboxes[frame_id].split(',')))
ann = dict(
id=records['ann_id'],
video_id=records['vid_id'],
image_id=records['img_id'],
instance_id=records['global_instance_id'],
category_id=0,
bbox=bbox,
area=bbox[2] * bbox[3])
trackingnet['annotations'].append(ann)
records['ann_id'] += 1
records['img_id'] += 1
records['global_instance_id'] += 1
records['vid_id'] += 1
if not osp.isdir(save_dir):
os.makedirs(save_dir)
mmcv.dump(trackingnet, osp.join(save_dir, f'trackingnet_{split}.json'))
print(f'-----TrackingNet {split} Dataset------')
print(f'{records["vid_id"]- 1} videos')
print(f'{records["global_instance_id"]- 1} instances')
print(f'{records["img_id"]- 1} images')
print(f'{records["ann_id"] - 1} objects')
print('-----------------------------') | Convert trackingnet dataset to COCO style. Args: ann_dir (str): The path of trackingnet test dataset save_dir (str): The path to save `trackingnet`. split (str): the split ('train' or 'test') of dataset. |
19,993 | import argparse
import glob
import os
import os.path as osp
import re
from collections import defaultdict
import mmcv
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description='OTB100 dataset to COCO Video format')
parser.add_argument(
'-i',
'--input',
help='root directory of OTB100 dataset',
)
parser.add_argument(
'-o',
'--output',
help='directory to save coco formatted label file',
)
return parser.parse_args() | null |
19,994 | import argparse
import glob
import os
import os.path as osp
import re
from collections import defaultdict
import mmcv
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `convert_otb100` function. Write a Python function `def convert_otb100(otb, ann_dir, save_dir)` to solve the following problem:
Convert OTB100 dataset to COCO style. Args: otb (dict): The converted COCO style annotations. ann_dir (str): The path of OTB100 dataset save_dir (str): The path to save `OTB100`.
Here is the function:
def convert_otb100(otb, ann_dir, save_dir):
"""Convert OTB100 dataset to COCO style.
Args:
otb (dict): The converted COCO style annotations.
ann_dir (str): The path of OTB100 dataset
save_dir (str): The path to save `OTB100`.
"""
records = dict(vid_id=1, img_id=1, ann_id=1, global_instance_id=1)
ann_dir = osp.join(ann_dir, 'data')
videos_list = os.listdir(ann_dir)
otb['categories'] = [dict(id=0, name=0)]
for video_name in tqdm(videos_list):
video_path = osp.join(ann_dir, video_name)
if video_name == 'David':
start_frame_id = 300
# The first five frames in Tiger1 can not be used
# as the initinal frames.
# Details can be seen in tracker_benchmark_v1.0/initOmit/tiger1.txt.
# The start_frame_id is 1-based.
elif video_name == 'Tiger1':
start_frame_id = 6
else:
start_frame_id = 1
img_list = os.listdir(osp.join(video_path, 'img'))
img_list = sorted(img_list)
img = mmcv.imread(osp.join(video_path, 'img', img_list[0]))
height, width, _ = img.shape
# One video may have several tracking instances with their
# respective annotations.
gt_list = glob.glob(
osp.join(ann_dir, video_name, 'groundtruth_rect*.txt'))
for gt_file in gt_list:
# exclude empty files
if osp.getsize(gt_file) == 0:
continue
video = dict(id=records['vid_id'], name=video_name)
otb['videos'].append(video)
gt_bboxes = mmcv.list_from_file(gt_file)
if video_name == 'Tiger1':
gt_bboxes = gt_bboxes[start_frame_id - 1:]
for frame_id, gt_bbox in enumerate(gt_bboxes):
src_frame_id = frame_id + start_frame_id - 1
file_name = osp.join(video_name, 'img', img_list[src_frame_id])
image = dict(
file_name=file_name,
height=height,
width=width,
id=records['img_id'],
frame_id=frame_id,
video_id=records['vid_id'])
otb['images'].append(image)
bbox = list(map(int, re.findall(r'-?\d+', gt_bbox)))
assert len(bbox) == 4
anno_dict = dict(
id=records['ann_id'],
video_id=records['vid_id'],
image_id=records['img_id'],
instance_id=records['global_instance_id'],
category_id=0,
bbox=bbox,
area=bbox[2] * bbox[3],
)
otb['annotations'].append(anno_dict)
records['ann_id'] += 1
records['img_id'] += 1
records['global_instance_id'] += 1
records['vid_id'] += 1
if not osp.isdir(save_dir):
os.makedirs(save_dir)
mmcv.dump(otb, osp.join(save_dir, 'otb100.json'))
print('-----OTB100 Dataset------')
print(f'{records["vid_id"]- 1} videos')
print(f'{records["global_instance_id"]- 1} instances')
print(f'{records["img_id"]- 1} images')
print(f'{records["ann_id"] - 1} objects')
print('-----------------------------') | Convert OTB100 dataset to COCO style. Args: otb (dict): The converted COCO style annotations. ann_dir (str): The path of OTB100 dataset save_dir (str): The path to save `OTB100`. |
19,995 | import argparse
import multiprocessing
import os
import os.path as osp
import re
import socket
from urllib import error, request
from tqdm import tqdm
def download_url(url_savedir_tuple):
url = url_savedir_tuple[0]
saved_dir = url_savedir_tuple[1]
video_zip = osp.basename(url)
if not osp.isdir(saved_dir):
os.makedirs(saved_dir, exist_ok=True)
saved_file = osp.join(saved_dir, video_zip)
try:
request.urlretrieve(url, saved_file)
except error.HTTPError as e:
print(e)
print('\r\n' + url + ' download failed!' + '\r\n')
except socket.timeout:
count = 1
while count <= 5:
try:
request.urlretrieve(url, saved_file)
break
except socket.timeout:
err_info = 'Reloading %s for %d time' % (video_zip, count)
print(err_info)
count += 1
if count > 5:
print('downloading %s failed!' % video_zip) | null |
19,996 | import argparse
import multiprocessing
import os
import os.path as osp
import re
import socket
from urllib import error, request
from tqdm import tqdm
def parse_url(homepage, href=None):
html = request.urlopen(homepage + 'datasets.html').read().decode('utf-8')
if BeautifulSoup is not None:
soup = BeautifulSoup(html, features='html.parser')
else:
raise ImportError(
"Please install beautifulsoup4 by 'pip install beautifulsoup4'")
tags = soup.find_all('a', href=href)
for tag in tags:
yield str(tag.get('href')).strip() | null |
19,997 | import argparse
import os.path as osp
from collections import defaultdict
import mmcv
from tao.toolkit.tao import Tao
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description='Make annotation files for TAO')
parser.add_argument('-i', '--input', help='path of TAO json file')
parser.add_argument(
'--filter-classes',
action='store_true',
help='whether filter 1230 classes to 482.')
return parser.parse_args() | null |
19,998 | import argparse
import os.path as osp
from collections import defaultdict
import mmcv
from tao.toolkit.tao import Tao
from tqdm import tqdm
def get_classes(tao_path, filter_classes=True):
train = mmcv.load(osp.join(tao_path, 'train.json'))
train_classes = list(set([_['category_id'] for _ in train['annotations']]))
print(f'TAO train set contains {len(train_classes)} categories.')
val = mmcv.load(osp.join(tao_path, 'validation.json'))
val_classes = list(set([_['category_id'] for _ in val['annotations']]))
print(f'TAO val set contains {len(val_classes)} categories.')
test = mmcv.load(osp.join(tao_path, 'test_categories.json'))
test_classes = list(set([_['id'] for _ in test['categories']]))
print(f'TAO test set contains {len(test_classes)} categories.')
tao_classes = set(train_classes + val_classes + test_classes)
print(f'TAO totally contains {len(tao_classes)} categories.')
tao_classes = [_ for _ in train['categories'] if _['id'] in tao_classes]
with open(osp.join(tao_path, 'tao_classes.txt'), 'wt') as f:
for c in tao_classes:
name = c['name']
f.writelines(f'{name}\n')
if filter_classes:
return tao_classes
else:
return train['categories'] | null |
19,999 | import argparse
import os.path as osp
from collections import defaultdict
import mmcv
from tao.toolkit.tao import Tao
from tqdm import tqdm
def convert_tao(file, classes):
tao = Tao(file)
raw = mmcv.load(file)
out = defaultdict(list)
out['tracks'] = raw['tracks'].copy()
out['info'] = raw['info'].copy()
out['licenses'] = raw['licenses'].copy()
out['categories'] = classes
for video in tqdm(raw['videos']):
img_infos = tao.vid_img_map[video['id']]
img_infos = sorted(img_infos, key=lambda x: x['frame_index'])
frame_range = img_infos[1]['frame_index'] - img_infos[0]['frame_index']
video['frame_range'] = frame_range
out['videos'].append(video)
for i, img_info in enumerate(img_infos):
img_info['frame_id'] = i
img_info['neg_category_ids'] = video['neg_category_ids']
img_info['not_exhaustive_category_ids'] = video[
'not_exhaustive_category_ids']
out['images'].append(img_info)
ann_infos = tao.img_ann_map[img_info['id']]
for ann_info in ann_infos:
ann_info['instance_id'] = ann_info['track_id']
out['annotations'].append(ann_info)
assert len(out['videos']) == len(raw['videos'])
assert len(out['images']) == len(raw['images'])
assert len(out['annotations']) == len(raw['annotations'])
return out | null |
20,000 | import argparse
import os
import os.path as osp
from collections import defaultdict
import mmcv
import numpy as np
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MOT label and detections to COCO-VID format.')
parser.add_argument('-i', '--input', help='path of MOT data')
parser.add_argument(
'-o', '--output', help='path to save coco formatted label file')
parser.add_argument(
'--convert-det',
action='store_true',
help='convert official detection results.')
parser.add_argument(
'--split-train',
action='store_true',
help='split the train set into half-train and half-validate.')
return parser.parse_args() | null |
20,001 | import argparse
import os
import os.path as osp
from collections import defaultdict
import mmcv
import numpy as np
from tqdm import tqdm
USELESS = [3, 4, 5, 6, 9, 10, 11]
IGNORES = [2, 7, 8, 12, 13]
def parse_gts(gts, is_mot15):
outputs = defaultdict(list)
for gt in gts:
gt = gt.strip().split(',')
frame_id, ins_id = map(int, gt[:2])
bbox = list(map(float, gt[2:6]))
if is_mot15:
conf = 1.
class_id = 1
visibility = 1.
else:
conf = float(gt[6])
class_id = int(gt[7])
visibility = float(gt[8])
if class_id in USELESS:
continue
elif class_id in IGNORES:
continue
anns = dict(
category_id=1,
bbox=bbox,
area=bbox[2] * bbox[3],
iscrowd=False,
visibility=visibility,
mot_instance_id=ins_id,
mot_conf=conf,
mot_class_id=class_id)
outputs[frame_id].append(anns)
return outputs | null |
20,002 | import argparse
import os
import os.path as osp
from collections import defaultdict
import mmcv
import numpy as np
from tqdm import tqdm
def parse_dets(dets):
outputs = defaultdict(list)
for det in dets:
det = det.strip().split(',')
frame_id, ins_id = map(int, det[:2])
assert ins_id == -1
bbox = list(map(float, det[2:7]))
# [x1, y1, x2, y2] to be consistent with mmdet
bbox = [
bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3], bbox[4]
]
outputs[frame_id].append(bbox)
return outputs | null |
20,003 | import argparse
import json
import os
import os.path as osp
from collections import defaultdict
import mmcv
from PIL import Image
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description='CrowdHuman to COCO Video format')
parser.add_argument(
'-i',
'--input',
help='root directory of CrowdHuman annotations',
)
parser.add_argument(
'-o',
'--output',
help='directory to save coco formatted label file',
)
return parser.parse_args() | null |
20,004 | import argparse
import json
import os
import os.path as osp
from collections import defaultdict
import mmcv
from PIL import Image
from tqdm import tqdm
def load_odgt(filename):
with open(filename, 'r') as f:
lines = f.readlines()
data_infos = [json.loads(line.strip('\n')) for line in lines]
return data_infos
The provided code snippet includes necessary dependencies for implementing the `convert_crowdhuman` function. Write a Python function `def convert_crowdhuman(ann_dir, save_dir, mode='train')` to solve the following problem:
Convert CrowdHuman dataset in COCO style. Args: ann_dir (str): The path of CrowdHuman dataset. save_dir (str): The path to save annotation files. mode (str): Convert train dataset or validation dataset. Options are 'train', 'val'. Default: 'train'.
Here is the function:
def convert_crowdhuman(ann_dir, save_dir, mode='train'):
"""Convert CrowdHuman dataset in COCO style.
Args:
ann_dir (str): The path of CrowdHuman dataset.
save_dir (str): The path to save annotation files.
mode (str): Convert train dataset or validation dataset. Options are
'train', 'val'. Default: 'train'.
"""
assert mode in ['train', 'val']
records = dict(img_id=1, ann_id=1)
outputs = defaultdict(list)
outputs['categories'] = [dict(id=1, name='pedestrian')]
data_infos = load_odgt(osp.join(ann_dir, f'annotation_{mode}.odgt'))
for data_info in tqdm(data_infos):
img_name = osp.join('Images', f"{data_info['ID']}.jpg")
img = Image.open(osp.join(ann_dir, mode, img_name))
width, height = img.size[:2]
image = dict(
file_name=img_name,
height=height,
width=width,
id=records['img_id'])
outputs['images'].append(image)
if mode != 'test':
for ann_info in data_info['gtboxes']:
bbox = ann_info['fbox']
if 'extra' in ann_info and 'ignore' in ann_info[
'extra'] and ann_info['extra']['ignore'] == 1:
iscrowd = True
else:
iscrowd = False
ann = dict(
id=records['ann_id'],
image_id=records['img_id'],
category_id=outputs['categories'][0]['id'],
vis_bbox=ann_info['vbox'],
bbox=bbox,
area=bbox[2] * bbox[3],
iscrowd=iscrowd)
outputs['annotations'].append(ann)
records['ann_id'] += 1
records['img_id'] += 1
if not osp.isdir(save_dir):
os.makedirs(save_dir)
mmcv.dump(outputs, osp.join(save_dir, f'crowdhuman_{mode}.json'))
print(f'-----CrowdHuman {mode} set------')
print(f'total {records["img_id"] - 1} images')
if mode != 'test':
print(f'{records["ann_id"] - 1} pedestrians are annotated.')
print('-----------------------') | Convert CrowdHuman dataset in COCO style. Args: ann_dir (str): The path of CrowdHuman dataset. save_dir (str): The path to save annotation files. mode (str): Convert train dataset or validation dataset. Options are 'train', 'val'. Default: 'train'. |
20,005 | import argparse
import os
import os.path as osp
import random
import mmcv
import numpy as np
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MOT dataset into ReID dataset.')
parser.add_argument('-i', '--input', help='path of MOT data')
parser.add_argument('-o', '--output', help='path to save ReID dataset')
parser.add_argument(
'--val-split',
type=float,
default=0.2,
help='proportion of the validation dataset to the whole ReID dataset')
parser.add_argument(
'--vis-threshold',
type=float,
default=0.3,
help='threshold of visibility for each person')
parser.add_argument(
'--min-per-person',
type=int,
default=8,
help='minimum number of images for each person')
parser.add_argument(
'--max-per-person',
type=int,
default=1000,
help='maxmum number of images for each person')
return parser.parse_args() | null |
20,006 | import argparse
import os
import os.path as osp
from collections import defaultdict
import mmcv
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description='Convert DanceTrack label and detections to \
COCO-VID format.')
parser.add_argument('-i', '--input', help='path of MOT data')
parser.add_argument(
'-o', '--output', help='path to save coco formatted label file')
return parser.parse_args() | null |
20,007 | import argparse
import os
import os.path as osp
from collections import defaultdict
import mmcv
from tqdm import tqdm
USELESS = [3, 4, 5, 6, 9, 10, 11]
IGNORES = [2, 7, 8, 12, 13]
def parse_gts(gts):
outputs = defaultdict(list)
for gt in gts:
gt = gt.strip().split(',')
frame_id, ins_id = map(int, gt[:2])
bbox = list(map(float, gt[2:6]))
conf = float(gt[6])
class_id = int(gt[7])
visibility = float(gt[8])
if class_id in USELESS:
continue
elif class_id in IGNORES:
continue
anns = dict(
category_id=1,
bbox=bbox,
area=bbox[2] * bbox[3],
iscrowd=False,
visibility=visibility,
mot_instance_id=ins_id,
mot_conf=conf,
mot_class_id=class_id)
outputs[frame_id].append(anns)
return outputs | null |
20,008 | import argparse
import glob
import os
import os.path as osp
import xml.etree.ElementTree as ET
from collections import defaultdict
import mmcv
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description='ImageNet DET to COCO Video format')
parser.add_argument(
'-i',
'--input',
help='root directory of ImageNet DET annotations',
)
parser.add_argument(
'-o',
'--output',
help='directory to save coco formatted label file',
)
return parser.parse_args() | null |
20,009 | import argparse
import glob
import os
import os.path as osp
import xml.etree.ElementTree as ET
from collections import defaultdict
import mmcv
from tqdm import tqdm
CLASSES = ('airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',
'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',
'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle',
'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger',
'train', 'turtle', 'watercraft', 'whale', 'zebra',
'other_categeries')
def parse_xml(img_name, xml_path, is_vid_train_frame, records, DET,
obj_num_classes):
"""Parse xml annotations and record them.
Args:
img_name (str): image file path.
xml_path (str): annotation file path.
is_vid_train_frame (bool): If True, the image is used for the training
of video object detection task, otherwise, not used.
records (dict): The record information like image id, annotation id.
DET (dict): The converted COCO style annotations.
obj_num_classes (dict): The number of objects per class.
Returns:
tuple: (records, DET, obj_num_classes), records is the updated record
information like image id, annotation id, DET is the updated
COCO style annotations, obj_num_classes is the updated number of
objects per class.
"""
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
image = dict(
file_name=img_name,
height=height,
width=width,
id=records['img_id'],
is_vid_train_frame=is_vid_train_frame)
DET['images'].append(image)
if is_vid_train_frame:
records['vid_train_frames'] += 1
if root.findall('object') == []:
print(f'{xml_path} has no objects.')
records['num_no_objects'] += 1
records['img_id'] += 1
return records, DET, obj_num_classes
for obj in root.findall('object'):
name = obj.find('name').text
if name in cats_id_maps:
category_id = cats_id_maps[name]
else:
category_id = len(cats_id_maps) + 1
bnd_box = obj.find('bndbox')
x1, y1, x2, y2 = [
int(bnd_box.find('xmin').text),
int(bnd_box.find('ymin').text),
int(bnd_box.find('xmax').text),
int(bnd_box.find('ymax').text)
]
w = x2 - x1
h = y2 - y1
ann = dict(
id=records['ann_id'],
image_id=records['img_id'],
category_id=category_id,
bbox=[x1, y1, w, h],
area=w * h,
iscrowd=False)
DET['annotations'].append(ann)
if category_id not in obj_num_classes:
obj_num_classes[category_id] = 1
else:
obj_num_classes[category_id] += 1
records['ann_id'] += 1
records['img_id'] += 1
return records, DET, obj_num_classes
The provided code snippet includes necessary dependencies for implementing the `convert_det` function. Write a Python function `def convert_det(DET, ann_dir, save_dir)` to solve the following problem:
Convert ImageNet DET dataset in COCO style. Args: DET (dict): The converted COCO style annotations. ann_dir (str): The path of ImageNet DET dataset save_dir (str): The path to save `DET`.
Here is the function:
def convert_det(DET, ann_dir, save_dir):
"""Convert ImageNet DET dataset in COCO style.
Args:
DET (dict): The converted COCO style annotations.
ann_dir (str): The path of ImageNet DET dataset
save_dir (str): The path to save `DET`.
"""
dataset_sets = ('train/ILSVRC2013_train', 'train/ILSVRC2014_train_0000',
'train/ILSVRC2014_train_0001',
'train/ILSVRC2014_train_0002',
'train/ILSVRC2014_train_0003',
'train/ILSVRC2014_train_0004',
'train/ILSVRC2014_train_0005',
'train/ILSVRC2014_train_0006')
records = dict(img_id=1, ann_id=1, num_no_objects=0, vid_train_frames=0)
obj_num_classes = dict()
vid_train_img_list = osp.join(ann_dir, 'Lists/DET_train_30classes.txt')
vid_train_img_list = mmcv.list_from_file(vid_train_img_list)
vid_train_img_names = []
for vid_train_img_info in vid_train_img_list:
vid_train_img_names.append(f"{vid_train_img_info.split(' ')[0]}.JPEG")
for img_name in tqdm(vid_train_img_names):
xml_path = osp.join(ann_dir, 'Annotations/DET',
img_name.replace('JPEG', 'xml'))
records, DET, obj_num_classes = parse_xml(img_name, xml_path, True,
records, DET,
obj_num_classes)
for sub_set in tqdm(dataset_sets):
sub_set_base_path = osp.join(ann_dir, 'Annotations/DET', sub_set)
if 'train/ILSVRC2013_train' == sub_set:
xml_paths = sorted(
glob.glob(osp.join(sub_set_base_path, '*', '*.xml')))
else:
xml_paths = sorted(glob.glob(osp.join(sub_set_base_path, '*.xml')))
for xml_path in tqdm(xml_paths):
img_name = xml_path.replace(sub_set_base_path, sub_set)
img_name = img_name.replace('xml', 'JPEG')
is_vid_train_frame = False
if img_name in vid_train_img_names:
continue
records, DET, obj_num_classes = parse_xml(img_name, xml_path,
is_vid_train_frame,
records, DET,
obj_num_classes)
if not osp.isdir(save_dir):
os.makedirs(save_dir)
mmcv.dump(DET, osp.join(save_dir, 'imagenet_det_30plus1cls.json'))
print('-----ImageNet DET------')
print(f'total {records["img_id"] - 1} images')
print(f'{records["num_no_objects"]} images have no objects')
print(f'total {records["vid_train_frames"]} images '
'for video detection training')
print(f'{records["ann_id"] - 1} objects are annotated.')
print('-----------------------')
for i in range(1, len(CLASSES) + 1):
print(f'Class {i} {CLASSES[i - 1]} has {obj_num_classes[i]} objects.') | Convert ImageNet DET dataset in COCO style. Args: DET (dict): The converted COCO style annotations. ann_dir (str): The path of ImageNet DET dataset save_dir (str): The path to save `DET`. |
20,010 | import argparse
import os
import os.path as osp
import xml.etree.ElementTree as ET
from collections import defaultdict
import mmcv
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description='ImageNet VID to COCO Video format')
parser.add_argument(
'-i',
'--input',
help='root directory of ImageNet VID annotations',
)
parser.add_argument(
'-o',
'--output',
help='directory to save coco formatted label file',
)
return parser.parse_args() | null |
20,011 | import argparse
import os
import os.path as osp
import xml.etree.ElementTree as ET
from collections import defaultdict
import mmcv
from tqdm import tqdm
CLASSES = ('airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',
'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',
'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle',
'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger',
'train', 'turtle', 'watercraft', 'whale', 'zebra')
cats_id_maps = {}
def parse_train_list(ann_dir):
"""Parse the txt file of ImageNet VID train dataset."""
img_list = osp.join(ann_dir, 'Lists/VID_train_15frames.txt')
img_list = mmcv.list_from_file(img_list)
train_infos = defaultdict(list)
for info in img_list:
info = info.split(' ')
if info[0] not in train_infos:
train_infos[info[0]] = dict(
vid_train_frames=[int(info[2]) - 1], num_frames=int(info[-1]))
else:
train_infos[info[0]]['vid_train_frames'].append(int(info[2]) - 1)
return train_infos
def parse_val_list(ann_dir):
"""Parse the txt file of ImageNet VID val dataset."""
img_list = osp.join(ann_dir, 'Lists/VID_val_videos.txt')
img_list = mmcv.list_from_file(img_list)
val_infos = defaultdict(list)
for info in img_list:
info = info.split(' ')
val_infos[info[0]] = dict(num_frames=int(info[-1]))
return val_infos
The provided code snippet includes necessary dependencies for implementing the `convert_vid` function. Write a Python function `def convert_vid(VID, ann_dir, save_dir, mode='train')` to solve the following problem:
Convert ImageNet VID dataset in COCO style. Args: VID (dict): The converted COCO style annotations. ann_dir (str): The path of ImageNet VID dataset. save_dir (str): The path to save `VID`. mode (str): Convert train dataset or validation dataset. Options are 'train', 'val'. Default: 'train'.
Here is the function:
def convert_vid(VID, ann_dir, save_dir, mode='train'):
"""Convert ImageNet VID dataset in COCO style.
Args:
VID (dict): The converted COCO style annotations.
ann_dir (str): The path of ImageNet VID dataset.
save_dir (str): The path to save `VID`.
mode (str): Convert train dataset or validation dataset. Options are
'train', 'val'. Default: 'train'.
"""
assert mode in ['train', 'val']
records = dict(
vid_id=1,
img_id=1,
ann_id=1,
global_instance_id=1,
num_vid_train_frames=0,
num_no_objects=0)
obj_num_classes = dict()
xml_dir = osp.join(ann_dir, 'Annotations/VID/')
if mode == 'train':
vid_infos = parse_train_list(ann_dir)
else:
vid_infos = parse_val_list(ann_dir)
for vid_info in tqdm(vid_infos):
instance_id_maps = dict()
vid_train_frames = vid_infos[vid_info].get('vid_train_frames', [])
records['num_vid_train_frames'] += len(vid_train_frames)
video = dict(
id=records['vid_id'],
name=vid_info,
vid_train_frames=vid_train_frames)
VID['videos'].append(video)
num_frames = vid_infos[vid_info]['num_frames']
for frame_id in range(num_frames):
is_vid_train_frame = True if frame_id in vid_train_frames \
else False
img_prefix = osp.join(vid_info, '%06d' % frame_id)
xml_name = osp.join(xml_dir, f'{img_prefix}.xml')
# parse XML annotation file
tree = ET.parse(xml_name)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
image = dict(
file_name=f'{img_prefix}.JPEG',
height=height,
width=width,
id=records['img_id'],
frame_id=frame_id,
video_id=records['vid_id'],
is_vid_train_frame=is_vid_train_frame)
VID['images'].append(image)
if root.findall('object') == []:
print(xml_name, 'has no objects.')
records['num_no_objects'] += 1
records['img_id'] += 1
continue
for obj in root.findall('object'):
name = obj.find('name').text
if name not in cats_id_maps:
continue
category_id = cats_id_maps[name]
bnd_box = obj.find('bndbox')
x1, y1, x2, y2 = [
int(bnd_box.find('xmin').text),
int(bnd_box.find('ymin').text),
int(bnd_box.find('xmax').text),
int(bnd_box.find('ymax').text)
]
w = x2 - x1
h = y2 - y1
track_id = obj.find('trackid').text
if track_id in instance_id_maps:
instance_id = instance_id_maps[track_id]
else:
instance_id = records['global_instance_id']
records['global_instance_id'] += 1
instance_id_maps[track_id] = instance_id
occluded = obj.find('occluded').text
generated = obj.find('generated').text
ann = dict(
id=records['ann_id'],
video_id=records['vid_id'],
image_id=records['img_id'],
category_id=category_id,
instance_id=instance_id,
bbox=[x1, y1, w, h],
area=w * h,
iscrowd=False,
occluded=occluded == '1',
generated=generated == '1')
if category_id not in obj_num_classes:
obj_num_classes[category_id] = 1
else:
obj_num_classes[category_id] += 1
VID['annotations'].append(ann)
records['ann_id'] += 1
records['img_id'] += 1
records['vid_id'] += 1
if not osp.isdir(save_dir):
os.makedirs(save_dir)
mmcv.dump(VID, osp.join(save_dir, f'imagenet_vid_{mode}.json'))
print(f'-----ImageNet VID {mode}------')
print(f'{records["vid_id"]- 1} videos')
print(f'{records["img_id"]- 1} images')
print(
f'{records["num_vid_train_frames"]} train frames for video detection')
print(f'{records["num_no_objects"]} images have no objects')
print(f'{records["ann_id"] - 1} objects')
print('-----------------------')
for i in range(1, len(CLASSES) + 1):
print(f'Class {i} {CLASSES[i - 1]} has {obj_num_classes[i]} objects.') | Convert ImageNet VID dataset in COCO style. Args: VID (dict): The converted COCO style annotations. ann_dir (str): The path of ImageNet VID dataset. save_dir (str): The path to save `VID`. mode (str): Convert train dataset or validation dataset. Options are 'train', 'val'. Default: 'train'. |
20,012 | import argparse
import copy
import os
import os.path as osp
from collections import defaultdict
import mmcv
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description='YouTube-VIS to COCO Video format')
parser.add_argument(
'-i',
'--input',
help='root directory of YouTube-VIS annotations',
)
parser.add_argument(
'-o',
'--output',
help='directory to save coco formatted label file',
)
parser.add_argument(
'--version',
choices=['2019', '2021'],
help='The version of YouTube-VIS Dataset',
)
return parser.parse_args() | null |
20,013 | import argparse
import copy
import os
import os.path as osp
from collections import defaultdict
import mmcv
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `convert_vis` function. Write a Python function `def convert_vis(ann_dir, save_dir, dataset_version, mode='train')` to solve the following problem:
Convert YouTube-VIS dataset in COCO style. Args: ann_dir (str): The path of YouTube-VIS dataset. save_dir (str): The path to save `VIS`. dataset_version (str): The version of dataset. Options are '2019', '2021'. mode (str): Convert train dataset or validation dataset or test dataset. Options are 'train', 'valid', 'test'. Default: 'train'.
Here is the function:
def convert_vis(ann_dir, save_dir, dataset_version, mode='train'):
"""Convert YouTube-VIS dataset in COCO style.
Args:
ann_dir (str): The path of YouTube-VIS dataset.
save_dir (str): The path to save `VIS`.
dataset_version (str): The version of dataset. Options are '2019',
'2021'.
mode (str): Convert train dataset or validation dataset or test
dataset. Options are 'train', 'valid', 'test'. Default: 'train'.
"""
assert dataset_version in ['2019', '2021']
assert mode in ['train', 'valid', 'test']
VIS = defaultdict(list)
records = dict(vid_id=1, img_id=1, ann_id=1, global_instance_id=1)
obj_num_classes = dict()
if dataset_version == '2019':
official_anns = mmcv.load(osp.join(ann_dir, f'{mode}.json'))
elif dataset_version == '2021':
official_anns = mmcv.load(osp.join(ann_dir, mode, 'instances.json'))
VIS['categories'] = copy.deepcopy(official_anns['categories'])
has_annotations = mode == 'train'
if has_annotations:
vid_to_anns = defaultdict(list)
for ann_info in official_anns['annotations']:
vid_to_anns[ann_info['video_id']].append(ann_info)
video_infos = official_anns['videos']
for video_info in tqdm(video_infos):
video_name = video_info['file_names'][0].split(os.sep)[0]
video = dict(
id=video_info['id'],
name=video_name,
width=video_info['width'],
height=video_info['height'])
VIS['videos'].append(video)
num_frames = len(video_info['file_names'])
width = video_info['width']
height = video_info['height']
if has_annotations:
ann_infos_in_video = vid_to_anns[video_info['id']]
instance_id_maps = dict()
for frame_id in range(num_frames):
image = dict(
file_name=video_info['file_names'][frame_id],
height=height,
width=width,
id=records['img_id'],
frame_id=frame_id,
video_id=video_info['id'])
VIS['images'].append(image)
if has_annotations:
for ann_info in ann_infos_in_video:
bbox = ann_info['bboxes'][frame_id]
if bbox is None:
continue
category_id = ann_info['category_id']
track_id = ann_info['id']
segmentation = ann_info['segmentations'][frame_id]
area = ann_info['areas'][frame_id]
assert isinstance(category_id, int)
assert isinstance(track_id, int)
assert segmentation is not None
assert area is not None
if track_id in instance_id_maps:
instance_id = instance_id_maps[track_id]
else:
instance_id = records['global_instance_id']
records['global_instance_id'] += 1
instance_id_maps[track_id] = instance_id
ann = dict(
id=records['ann_id'],
video_id=video_info['id'],
image_id=records['img_id'],
category_id=category_id,
instance_id=instance_id,
bbox=bbox,
segmentation=segmentation,
area=area,
iscrowd=ann_info['iscrowd'])
if category_id not in obj_num_classes:
obj_num_classes[category_id] = 1
else:
obj_num_classes[category_id] += 1
VIS['annotations'].append(ann)
records['ann_id'] += 1
records['img_id'] += 1
records['vid_id'] += 1
if not osp.isdir(save_dir):
os.makedirs(save_dir)
mmcv.dump(VIS,
osp.join(save_dir, f'youtube_vis_{dataset_version}_{mode}.json'))
print(f'-----YouTube VIS {dataset_version} {mode}------')
print(f'{records["vid_id"]- 1} videos')
print(f'{records["img_id"]- 1} images')
if has_annotations:
print(f'{records["ann_id"] - 1} objects')
print(f'{records["global_instance_id"] - 1} instances')
print('-----------------------')
if has_annotations:
for i in range(1, len(VIS['categories']) + 1):
class_name = VIS['categories'][i - 1]['name']
print(f'Class {i} {class_name} has {obj_num_classes[i]} objects.') | Convert YouTube-VIS dataset in COCO style. Args: ann_dir (str): The path of YouTube-VIS dataset. save_dir (str): The path to save `VIS`. dataset_version (str): The version of dataset. Options are '2019', '2021'. mode (str): Convert train dataset or validation dataset or test dataset. Options are 'train', 'valid', 'test'. Default: 'train'. |
20,014 | import argparse
import os
import os.path as osp
from collections import defaultdict
import mmcv
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description='LaSOT test dataset to COCO Video format')
parser.add_argument(
'-i',
'--input',
help='root directory of LaSOT test dataset',
)
parser.add_argument(
'-o',
'--output',
help='directory to save coco formatted label file',
)
parser.add_argument(
'--split',
help='the split set of lasot, all denotes the whole dataset',
choices=['train', 'test', 'all'],
default='all')
return parser.parse_args() | null |
20,015 | import argparse
import os
import os.path as osp
from collections import defaultdict
import mmcv
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `convert_lasot` function. Write a Python function `def convert_lasot(ann_dir, save_dir, split='test')` to solve the following problem:
Convert lasot dataset to COCO style. Args: ann_dir (str): The path of lasot dataset save_dir (str): The path to save `lasot`. split (str): the split ('train' or 'test') of dataset.
Here is the function:
def convert_lasot(ann_dir, save_dir, split='test'):
"""Convert lasot dataset to COCO style.
Args:
ann_dir (str): The path of lasot dataset
save_dir (str): The path to save `lasot`.
split (str): the split ('train' or 'test') of dataset.
"""
assert split in ['train', 'test'], f'split [{split}] does not exist'
lasot = defaultdict(list)
records = dict(vid_id=1, img_id=1, ann_id=1, global_instance_id=1)
lasot['categories'] = [dict(id=0, name=0)]
videos_list = mmcv.list_from_file(
osp.join(osp.dirname(__file__), 'testing_set.txt'))
if split == 'train':
train_videos_list = []
for video_class in os.listdir(ann_dir):
for video_id in os.listdir(osp.join(ann_dir, video_class)):
if video_id not in videos_list:
train_videos_list.append(video_id)
videos_list = train_videos_list
for video_name in tqdm(videos_list, desc=split):
video_class = video_name.split('-')[0]
video_path = osp.join(ann_dir, video_class, video_name)
video = dict(id=records['vid_id'], name=video_name)
lasot['videos'].append(video)
gt_bboxes = mmcv.list_from_file(
osp.join(video_path, 'groundtruth.txt'))
full_occlusion = mmcv.list_from_file(
osp.join(video_path, 'full_occlusion.txt'))
full_occlusion = full_occlusion[0].split(',')
out_of_view = mmcv.list_from_file(
osp.join(video_path, 'out_of_view.txt'))
out_of_view = out_of_view[0].split(',')
img = mmcv.imread(osp.join(video_path, 'img/00000001.jpg'))
height, width, _ = img.shape
for frame_id, gt_bbox in enumerate(gt_bboxes):
file_name = '%08d' % (frame_id + 1) + '.jpg'
file_name = osp.join(video_class, video_name, 'img', file_name)
image = dict(
file_name=file_name,
height=height,
width=width,
id=records['img_id'],
frame_id=frame_id,
video_id=records['vid_id'])
lasot['images'].append(image)
x1, y1, w, h = gt_bbox.split(',')
ann = dict(
id=records['ann_id'],
video_id=records['vid_id'],
image_id=records['img_id'],
instance_id=records['global_instance_id'],
category_id=0,
bbox=[int(x1), int(y1), int(w),
int(h)],
area=int(w) * int(h),
full_occlusion=full_occlusion[frame_id] == '1',
out_of_view=out_of_view[frame_id] == '1')
lasot['annotations'].append(ann)
records['ann_id'] += 1
records['img_id'] += 1
records['global_instance_id'] += 1
records['vid_id'] += 1
if not osp.isdir(save_dir):
os.makedirs(save_dir)
mmcv.dump(lasot, osp.join(save_dir, f'lasot_{split}.json'))
print(f'-----LaSOT {split} Dataset------')
print(f'{records["vid_id"]- 1} videos')
print(f'{records["global_instance_id"]- 1} instances')
print(f'{records["img_id"]- 1} images')
print(f'{records["ann_id"] - 1} objects')
print('-----------------------------') | Convert lasot dataset to COCO style. Args: ann_dir (str): The path of lasot dataset save_dir (str): The path to save `lasot`. split (str): the split ('train' or 'test') of dataset. |
20,016 | import argparse
import glob
import os
import os.path as osp
import time
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(
description='Generate the information of LaSOT dataset')
parser.add_argument(
'-i',
'--input',
help='root directory of LaSOT dataset',
)
parser.add_argument(
'-o',
'--output',
help='directory to save text file',
)
parser.add_argument(
'--split',
help="the split set of LaSOT, 'all' denotes the whole dataset",
choices=['train', 'test', 'all'],
default='all')
return parser.parse_args() | null |
20,017 | import argparse
import glob
import os
import os.path as osp
import time
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `gen_data_infos` function. Write a Python function `def gen_data_infos(data_root, save_dir, split='train')` to solve the following problem:
Generate dataset information. Args: data_root (str): The path of dataset. save_dir (str): The path to save the information of dataset. split (str): the split ('train' or 'test') of dataset.
Here is the function:
def gen_data_infos(data_root, save_dir, split='train'):
"""Generate dataset information.
Args:
data_root (str): The path of dataset.
save_dir (str): The path to save the information of dataset.
split (str): the split ('train' or 'test') of dataset.
"""
print(f'Generate the information of {split} set of LaSOT dataset...')
start_time = time.time()
assert split in ['train', 'test']
test_videos_list = np.loadtxt(
osp.join(osp.dirname(__file__), 'testing_set.txt'), dtype=np.str_)
if split == 'test':
videos_list = test_videos_list.tolist()
else:
all_videos_list = glob.glob(data_root + '/*/*-*')
test_videos = set(test_videos_list)
videos_list = []
for x in all_videos_list:
x = osp.basename(x)
if x not in test_videos:
videos_list.append(x)
if not osp.isdir(save_dir):
os.makedirs(save_dir, exist_ok=True)
videos_list = sorted(videos_list)
with open(osp.join(save_dir, f'lasot_{split}_infos.txt'), 'w') as f:
f.write('The format of each line in this txt is '
'(video_path,annotation_path,start_frame_id,end_frame_id)')
for video_name in videos_list:
video_name = osp.join(video_name.split('-')[0], video_name)
video_path = osp.join(video_name, 'img')
ann_path = osp.join(video_name, 'groundtruth.txt')
img_names = glob.glob(
osp.join(data_root, video_name, 'img', '*.jpg'))
end_frame_name = max(
img_names, key=lambda x: int(osp.basename(x).split('.')[0]))
end_frame_id = int(osp.basename(end_frame_name).split('.')[0])
f.write(f'\n{video_path},{ann_path},1,{end_frame_id}')
print(f'Done! ({time.time()-start_time:.2f} s)')
print(f'The results are saved in {save_dir}') | Generate dataset information. Args: data_root (str): The path of dataset. save_dir (str): The path to save the information of dataset. split (str): the split ('train' or 'test') of dataset. |
20,018 | import argparse
import glob
import os
import os.path as osp
import time
def parse_args():
parser = argparse.ArgumentParser(
description='Generate the information of VOT dataset')
parser.add_argument(
'-i',
'--input',
help='root directory of VOT dataset',
)
parser.add_argument(
'-o',
'--output',
help='directory to save text file',
)
parser.add_argument(
'--dataset_type',
help='the type of vot challenge',
default='vot2018',
choices=[
'vot2018', 'vot2018_lt', 'vot2019', 'vot2019_lt', 'vot2019_rgbd',
'vot2019_rgbt'
])
return parser.parse_args() | null |
20,019 | import argparse
import glob
import os
import os.path as osp
import time
The provided code snippet includes necessary dependencies for implementing the `gen_data_infos` function. Write a Python function `def gen_data_infos(data_root, save_dir, dataset_type='vot2018')` to solve the following problem:
Generate dataset information. Args: data_root (str): The path of dataset. save_dir (str): The path to save the information of dataset.
Here is the function:
def gen_data_infos(data_root, save_dir, dataset_type='vot2018'):
"""Generate dataset information.
Args:
data_root (str): The path of dataset.
save_dir (str): The path to save the information of dataset.
"""
print('Generate the information of VOT dataset...')
start_time = time.time()
videos_list = os.listdir(osp.join(data_root, 'data'))
videos_list = [
x for x in videos_list if osp.isdir(osp.join(data_root, 'data', x))
]
if not osp.isdir(save_dir):
os.makedirs(save_dir, exist_ok=True)
videos_list = sorted(videos_list)
with open(osp.join(save_dir, f'{dataset_type}_infos.txt'), 'w') as f:
f.write('The format of each line in this txt is '
'(video_path,annotation_path,start_frame_id,end_frame_id)')
for video_name in videos_list:
video_path = osp.join('data', video_name, 'color')
ann_path = osp.join('data', video_name, 'groundtruth.txt')
img_names = glob.glob(osp.join(data_root, video_path, '*.jpg'))
end_frame_name = max(
img_names, key=lambda x: int(osp.basename(x).split('.')[0]))
end_frame_id = int(osp.basename(end_frame_name).split('.')[0])
f.write(f'\n{video_path},{ann_path},1,{end_frame_id}')
print(f'Done! ({time.time()-start_time:.2f} s)')
print(f'The results are saved in {save_dir}') | Generate dataset information. Args: data_root (str): The path of dataset. save_dir (str): The path to save the information of dataset. |
20,020 | import argparse
import os
import os.path as osp
import socket
import zipfile
from urllib import error, request
from tqdm import tqdm
VOT_DATASETS = dict(
vot2018='http://data.votchallenge.net/vot2018/main/description.json',
vot2018_lt= # noqa: E251
'http://data.votchallenge.net/vot2018/longterm/description.json',
vot2019='http://data.votchallenge.net/vot2019/main/description.json',
vot2019_lt= # noqa: E251
'http://data.votchallenge.net/vot2019/longterm/description.json',
vot2019_rgbd='http://data.votchallenge.net/vot2019/rgbd/description.json',
vot2019_rgbt= # noqa: E251
'http://data.votchallenge.net/vot2019/rgbtir/meta/description.json',
vot2020='https://data.votchallenge.net/vot2020/shortterm/description.json',
vot2020_rgbt= # noqa: E251
'http://data.votchallenge.net/vot2020/rgbtir/meta/description.json',
vot2021='https://data.votchallenge.net/vot2021/shortterm/description.json')
def download_url(url, saved_file):
def download_dataset(dataset_name, path):
url = VOT_DATASETS[dataset_name]
meta = download_json(url)
base_url = get_base_url(url) + '/'
for sequence in tqdm(meta['sequences']):
sequence_directory = os.path.join(path, sequence['name'])
os.makedirs(sequence_directory, exist_ok=True)
annotations_url = join_url(base_url, sequence['annotations']['url'])
download_uncompress(annotations_url, sequence_directory)
for cname, channel in sequence['channels'].items():
channel_directory = os.path.join(sequence_directory, cname)
os.makedirs(channel_directory, exist_ok=True)
channel_url = join_url(base_url, channel['url'])
tmp_zip = osp.join(channel_directory, f'{sequence["name"]}.zip')
download_url(channel_url, tmp_zip)
try:
extract_files(tmp_zip, channel_directory)
except zipfile.BadZipFile:
print(f'[Error]: Please download {sequence["name"]} video \
manually through the {channel_url}')
os.remove(tmp_zip) | null |
20,021 | import argparse
import os
import os.path as osp
from collections import defaultdict
import cv2
import mmcv
import numpy as np
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description='VOT dataset to COCO Video format')
parser.add_argument(
'-i',
'--input',
help='root directory of VOT dataset',
)
parser.add_argument(
'-o',
'--output',
help='directory to save coco formatted label file',
),
parser.add_argument(
'--dataset_type',
help='the type of vot challenge',
default='vot2018',
choices=[
'vot2018', 'vot2018_lt', 'vot2019', 'vot2019_lt', 'vot2019_rgbd',
'vot2019_rgbt'
])
return parser.parse_args() | null |
20,022 | import argparse
import os
import os.path as osp
from collections import defaultdict
import cv2
import mmcv
import numpy as np
from tqdm import tqdm
def parse_attribute(video_path, attr_name, img_num):
"""Parse attribute of each video in VOT.
Args:
video_path (str): The path of video.
attr_name (str): The name of video's attribute.
img_num (str): The length of video.
Returns:
attr_list (list): The element is the tag of each image.
"""
attr_path = osp.join(video_path, attr_name + '.tag')
if osp.isfile(attr_path):
attr_list = mmcv.list_from_file(attr_path)
else:
attr_list = []
# unspecified tag is '0'(default)
attr_list += ['0'] * (img_num - len(attr_list))
return attr_list
The provided code snippet includes necessary dependencies for implementing the `convert_vot` function. Write a Python function `def convert_vot(ann_dir, save_dir, dataset_type)` to solve the following problem:
Convert vot dataset to COCO style. Args: ann_dir (str): The path of vot dataset save_dir (str): The path to save `vot`. dataset_type (str): The type of vot challenge.
Here is the function:
def convert_vot(ann_dir, save_dir, dataset_type):
"""Convert vot dataset to COCO style.
Args:
ann_dir (str): The path of vot dataset
save_dir (str): The path to save `vot`.
dataset_type (str): The type of vot challenge.
"""
vot = defaultdict(list)
records = dict(vid_id=1, img_id=1, ann_id=1, global_instance_id=1)
vot['categories'] = [dict(id=0, name=0)]
videos_list = os.listdir(osp.join(ann_dir, 'data'))
videos_list = [
x for x in videos_list if osp.isdir(osp.join(ann_dir, 'data', x))
]
for video_name in tqdm(videos_list):
video = dict(id=records['vid_id'], name=video_name)
vot['videos'].append(video)
video_path = osp.join(ann_dir, 'data', video_name)
ann_file = osp.join(video_path, 'groundtruth.txt')
gt_anns = mmcv.list_from_file(ann_file)
camera_motion = parse_attribute(video_path, 'camera_motion',
len(gt_anns))
illustration_change = parse_attribute(video_path, 'illu_change',
len(gt_anns))
motion_change = parse_attribute(video_path, 'motion_change',
len(gt_anns))
occlusion = parse_attribute(video_path, 'occlusion', len(gt_anns))
size_change = parse_attribute(video_path, 'size_change', len(gt_anns))
img = mmcv.imread(osp.join(video_path, 'color', '00000001.jpg'))
height, width, _ = img.shape
for frame_id, gt_anno in enumerate(gt_anns):
file_name = '%08d' % (frame_id + 1) + '.jpg'
file_name = osp.join(video_name, 'color', file_name)
image = dict(
file_name=file_name,
height=height,
width=width,
id=records['img_id'],
frame_id=frame_id,
video_id=records['vid_id'])
vot['images'].append(image)
ann = dict(
id=records['ann_id'],
video_id=records['vid_id'],
image_id=records['img_id'],
instance_id=records['global_instance_id'],
category_id=0,
camera_motion=camera_motion[frame_id] == '1',
illustration_change=illustration_change[frame_id] == '1',
motion_change=motion_change[frame_id] == '1',
occlusion=occlusion[frame_id] == '1',
size_change=size_change[frame_id] == '1')
anno = gt_anno.split(',')
# TODO support mask annotations after VOT2019
if anno[0][0] == 'm':
continue
else:
# bbox is in [x1, y1, x2, y2, x3, y3, x4, y4] format
bbox = list(map(lambda x: float(x), anno))
if len(bbox) == 4:
bbox = [
bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1],
bbox[0] + bbox[2], bbox[1] + bbox[3], bbox[0],
bbox[1] + bbox[3]
]
assert len(bbox) == 8
ann['bbox'] = bbox
ann['area'] = cv2.contourArea(
np.array(bbox, dtype='int').reshape(4, 2))
vot['annotations'].append(ann)
records['ann_id'] += 1
records['img_id'] += 1
records['global_instance_id'] += 1
records['vid_id'] += 1
if not osp.isdir(save_dir):
os.makedirs(save_dir)
mmcv.dump(vot, osp.join(save_dir, f'{dataset_type}.json'))
print(f'-----VOT Challenge {dataset_type} Dataset------')
print(f'{records["vid_id"]- 1} videos')
print(f'{records["global_instance_id"]- 1} instances')
print(f'{records["img_id"]- 1} images')
print(f'{records["ann_id"] - 1} objects')
print('-----------------------------') | Convert vot dataset to COCO style. Args: ann_dir (str): The path of vot dataset save_dir (str): The path to save `vot`. dataset_type (str): The type of vot challenge. |
20,023 | import argparse
import os
import numpy as np
import torch
from mmcv import Config, DictAction, get_logger, print_log
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.datasets import build_dataset
def parse_range(range_str):
range_list = range_str.split(',')
assert len(range_list) == 3 and float(range_list[1]) >= float(
range_list[0])
param = map(float, range_list)
return np.round(np.arange(*param), decimals=2)
def parse_args():
parser = argparse.ArgumentParser(description='mmtrack test model')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument(
'--penalty-k-range',
type=parse_range,
help="the range of hyper-parameter 'penalty_k' in SiamRPN++; the format \
is 'start,stop,step'")
parser.add_argument(
'--lr-range',
type=parse_range,
help="the range of hyper-parameter 'lr' in SiamRPN++; the format is \
'start,stop,step'")
parser.add_argument(
'--win-influ-range',
type=parse_range,
help="the range of hyper-parameter 'window_influence' in SiamRPN++; the \
format is 'start,stop,step'")
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument('--log', help='log file', default=None)
parser.add_argument('--eval', type=str, nargs='+', help='eval types')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args | null |
20,024 | import argparse
import os
import os.path as osp
import mmcv
import motmetrics as mm
import numpy as np
from mmcv import Config
from mmcv.utils import print_log
from mmtrack.core.utils import imshow_mot_errors
from mmtrack.datasets import build_dataset
def parse_args():
parser = argparse.ArgumentParser(
description='visualize errors for multiple object tracking')
parser.add_argument('config', help='path of the config file')
parser.add_argument('--result-file', help='path of the inference result')
parser.add_argument(
'--out-dir',
help='directory where painted images or videos will be saved')
parser.add_argument(
'--show',
action='store_true',
help='whether to show the results on the fly')
parser.add_argument(
'--fps', type=int, default=3, help='FPS of the output video')
parser.add_argument(
'--backend',
type=str,
choices=['cv2', 'plt'],
default='cv2',
help='backend of visualization')
args = parser.parse_args()
return args | null |
20,025 | import argparse
import os
import os.path as osp
import mmcv
import motmetrics as mm
import numpy as np
from mmcv import Config
from mmcv.utils import print_log
from mmtrack.core.utils import imshow_mot_errors
from mmtrack.datasets import build_dataset
The provided code snippet includes necessary dependencies for implementing the `compare_res_gts` function. Write a Python function `def compare_res_gts(resfiles, dataset, video_name)` to solve the following problem:
Evaluate the results of the video. Args: resfiles (dict): A dict containing the directory of the MOT results. dataset (Dataset): MOT dataset of the video to be evaluated. video_name (str): Name of the video to be evaluated. Returns: tuple: (acc, res, gt), acc contains the results of MOT metrics, res is the results of inference and gt is the ground truth.
Here is the function:
def compare_res_gts(resfiles, dataset, video_name):
"""Evaluate the results of the video.
Args:
resfiles (dict): A dict containing the directory of the MOT results.
dataset (Dataset): MOT dataset of the video to be evaluated.
video_name (str): Name of the video to be evaluated.
Returns:
tuple: (acc, res, gt), acc contains the results of MOT metrics,
res is the results of inference and gt is the ground truth.
"""
if 'half-train' in dataset.ann_file:
gt_file = osp.join(dataset.img_prefix,
f'{video_name}/gt/gt_half-train.txt')
elif 'half-val' in dataset.ann_file:
gt_file = osp.join(dataset.img_prefix,
f'{video_name}/gt/gt_half-val.txt')
else:
gt_file = osp.join(dataset.img_prefix, f'{video_name}/gt/gt.txt')
res_file = osp.join(resfiles['track'], f'{video_name}.txt')
gt = mm.io.loadtxt(gt_file)
res = mm.io.loadtxt(res_file)
ini_file = osp.join(dataset.img_prefix, f'{video_name}/seqinfo.ini')
if osp.exists(ini_file):
acc, ana = mm.utils.CLEAR_MOT_M(gt, res, ini_file)
else:
acc = mm.utils.compare_to_groundtruth(gt, res)
return acc, res, gt | Evaluate the results of the video. Args: resfiles (dict): A dict containing the directory of the MOT results. dataset (Dataset): MOT dataset of the video to be evaluated. video_name (str): Name of the video to be evaluated. Returns: tuple: (acc, res, gt), acc contains the results of MOT metrics, res is the results of inference and gt is the ground truth. |
20,026 | import argparse
import os
import os.path as osp
import mmcv
def parse_args():
parser = argparse.ArgumentParser(
description='Make dummy results for MOT Challenge.')
parser.add_argument('json_file', help='Input JSON file.')
parser.add_argument('out_folder', help='Output folder.')
args = parser.parse_args()
return args | null |
20,027 | import argparse
import os
from itertools import product
import mmcv
import torch
from dotty_dict import dotty
from mmcv import Config, DictAction, get_logger, print_log
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.datasets import build_dataset
from mmtrack.models import build_tracker
def parse_args():
parser = argparse.ArgumentParser(description='mmtrack test model')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument('--log', help='log file')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument('--eval', type=str, nargs='+', help='eval types')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args | null |
20,028 | import argparse
import os
from itertools import product
import mmcv
import torch
from dotty_dict import dotty
from mmcv import Config, DictAction, get_logger, print_log
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.datasets import build_dataset
from mmtrack.models import build_tracker
def get_search_params(cfg, search_params=None, prefix=None, logger=None):
if search_params is None:
search_params = dict()
for k, v in cfg.items():
if prefix is not None:
entire_k = prefix + '.' + k
else:
entire_k = k
if isinstance(v, list):
print_log(f'search `{entire_k}` in {v}.', logger)
search_params[entire_k] = v
if isinstance(v, dict):
search_params = get_search_params(v, search_params, entire_k,
logger)
return search_params | null |
20,029 | import argparse
import glob
import os.path as osp
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args | null |
20,030 | import argparse
import glob
import os.path as osp
import subprocess
import torch
def process_checkpoint(in_file, out_file):
exp_dir = osp.dirname(in_file)
log_json_path = list(sorted(glob.glob(osp.join(exp_dir,
'*.log.json'))))[-1]
model_time = osp.split(log_json_path)[-1].split('.')[0]
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'_{model_time}' + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
cp_log_json_path = out_file_name + f'_{osp.basename(log_json_path)}'
subprocess.Popen(['cp', log_json_path, cp_log_json_path]) | null |
20,031 | import argparse
import json
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def cal_train_time(log_dicts, args):
for i, log_dict in enumerate(log_dicts):
print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}')
all_times = []
for epoch in log_dict.keys():
if args.include_outliers:
all_times.append(log_dict[epoch]['time'])
else:
all_times.append(log_dict[epoch]['time'][1:])
all_times = np.array(all_times)
epoch_ave_time = all_times.mean(-1)
slowest_epoch = epoch_ave_time.argmax()
fastest_epoch = epoch_ave_time.argmin()
std_over_epoch = epoch_ave_time.std()
print(f'slowest epoch {slowest_epoch + 1}, '
f'average time is {epoch_ave_time[slowest_epoch]:.4f}')
print(f'fastest epoch {fastest_epoch + 1}, '
f'average time is {epoch_ave_time[fastest_epoch]:.4f}')
print(f'time std over epochs is {std_over_epoch:.4f}')
print(f'average iter time: {np.mean(all_times):.4f} s/iter')
print() | null |
20,032 | import argparse
import json
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def plot_curve(log_dicts, args):
if args.backend is not None:
plt.switch_backend(args.backend)
sns.set_style(args.style)
# if legend is None, use {filename}_{key} as legend
legend = args.legend
if legend is None:
legend = []
for json_log in args.json_logs:
for metric in args.keys:
legend.append(f'{json_log}_{metric}')
assert len(legend) == (len(args.json_logs) * len(args.keys))
metrics = args.keys
num_metrics = len(metrics)
for i, log_dict in enumerate(log_dicts):
epochs = list(log_dict.keys())
for j, metric in enumerate(metrics):
print(f'plot curve of {args.json_logs[i]}, metric is {metric}')
if metric not in log_dict[epochs[0]]:
raise KeyError(
f'{args.json_logs[i]} does not contain metric {metric}')
if 'mAP' in metric:
xs = np.arange(1, max(epochs) + 1)
ys = []
for epoch in epochs:
ys += log_dict[epoch][metric]
ax = plt.gca()
ax.set_xticks(xs)
plt.xlabel('epoch')
plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o')
else:
xs = []
ys = []
num_iters_per_epoch = log_dict[epochs[0]]['iter'][-1]
for epoch in epochs:
iters = log_dict[epoch]['iter']
if log_dict[epoch]['mode'][-1] == 'val':
iters = iters[:-1]
xs.append(
np.array(iters) + (epoch - 1) * num_iters_per_epoch)
ys.append(np.array(log_dict[epoch][metric][:len(iters)]))
xs = np.concatenate(xs)
ys = np.concatenate(ys)
plt.xlabel('iter')
plt.plot(
xs, ys, label=legend[i * num_metrics + j], linewidth=0.5)
plt.legend()
if args.title is not None:
plt.title(args.title)
if args.out is None:
plt.show()
else:
print(f'save curve to: {args.out}')
plt.savefig(args.out)
plt.cla() | null |
20,033 | import argparse
import json
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def add_plot_parser(subparsers):
parser_plt = subparsers.add_parser(
'plot_curve', help='parser for plotting curves')
parser_plt.add_argument(
'json_logs',
type=str,
nargs='+',
help='path of train log in json format')
parser_plt.add_argument(
'--keys',
type=str,
nargs='+',
default=['bbox_mAP'],
help='the metric that you want to plot')
parser_plt.add_argument('--title', type=str, help='title of figure')
parser_plt.add_argument(
'--legend',
type=str,
nargs='+',
default=None,
help='legend of each plot')
parser_plt.add_argument(
'--backend', type=str, default=None, help='backend of plt')
parser_plt.add_argument(
'--style', type=str, default='dark', help='style of plt')
parser_plt.add_argument('--out', type=str, default=None)
def add_time_parser(subparsers):
parser_time = subparsers.add_parser(
'cal_train_time',
help='parser for computing the average time per training iteration')
parser_time.add_argument(
'json_logs',
type=str,
nargs='+',
help='path of train log in json format')
parser_time.add_argument(
'--include-outliers',
action='store_true',
help='include the first value of every epoch when computing '
'the average time')
def parse_args():
parser = argparse.ArgumentParser(description='Analyze Json Log')
# currently only support plot curve and calculate average train time
subparsers = parser.add_subparsers(dest='task', help='task parser')
add_plot_parser(subparsers)
add_time_parser(subparsers)
args = parser.parse_args()
return args | null |
20,034 | import argparse
import json
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def load_json_logs(json_logs):
# load and convert json_logs to log_dict, key is epoch, value is a sub dict
# keys of sub dict is different metrics, e.g. memory, bbox_mAP
# value of sub dict is a list of corresponding values of all iterations
log_dicts = [dict() for _ in json_logs]
for json_log, log_dict in zip(json_logs, log_dicts):
with open(json_log, 'r') as log_file:
for line in log_file:
log = json.loads(line.strip())
# skip lines without `epoch` field
if 'epoch' not in log:
continue
epoch = log.pop('epoch')
if epoch not in log_dict:
log_dict[epoch] = defaultdict(list)
for k, v in log.items():
log_dict[epoch][k].append(v)
return log_dicts | null |
20,035 | import argparse
import time
import torch
from mmcv import Config
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel
from mmcv.runner import load_checkpoint, wrap_fp16_model
from mmdet.datasets import replace_ImageToTensor
from mmtrack.datasets import build_dataloader, build_dataset
from mmtrack.models import build_model
def parse_args():
parser = argparse.ArgumentParser(description='MMTrack benchmark a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument(
'--log-interval', default=50, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
args = parser.parse_args()
return args | null |
20,036 | import argparse
from mmcv import Config, DictAction
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='arguments in dict')
args = parser.parse_args()
return args | null |
20,037 | import os
from setuptools import setup, find_packages
import versioneer
def read_file(fname):
with open(fname, 'r') as f:
return f.read() | null |
20,046 | import logging
import os
import sys
from gooey import Gooey, GooeyParser
from ffsubsync.constants import (
RELEASE_URL,
WEBSITE,
DEV_WEBSITE,
DESCRIPTION,
LONG_DESCRIPTION,
PROJECT_NAME,
PROJECT_LICENSE,
COPYRIGHT_YEAR,
SUBSYNC_RESOURCES_ENV_MAGIC,
)
from ffsubsync.ffsubsync import run, add_cli_only_args
from ffsubsync.version import get_version, update_available
DESCRIPTION: str = "Synchronize subtitles with video."
def update_available():
import requests
from requests.exceptions import Timeout
from .constants import API_RELEASE_URL
try:
resp = requests.get(API_RELEASE_URL, timeout=1)
latest_vstr = resp.json()["tag_name"]
except Timeout:
return False
except KeyError:
return False
if not resp.ok:
return False
return make_version_tuple(get_version()) < make_version_tuple(latest_vstr)
def make_parser():
description = DESCRIPTION
if update_available():
description += (
"\nUpdate available! Please go to "
'"File" -> "Download latest release"'
" to update FFsubsync."
)
parser = GooeyParser(description=description)
main_group = parser.add_argument_group("Basic")
main_group.add_argument(
"reference",
help="Reference (video or subtitles file) to which to synchronize input subtitles.",
widget="FileChooser",
)
main_group.add_argument("srtin", help="Input subtitles file", widget="FileChooser")
main_group.add_argument(
"-o",
"--srtout",
help="Output subtitles file (default=${srtin}.synced.srt).",
widget="FileSaver",
)
advanced_group = parser.add_argument_group("Advanced")
# TODO: these are shared between gui and cli; don't duplicate this code
advanced_group.add_argument(
"--merge-with-reference",
"--merge",
action="store_true",
help="Merge reference subtitles with synced output subtitles.",
)
advanced_group.add_argument(
"--make-test-case",
"--create-test-case",
action="store_true",
help="If specified, create a test archive a few KiB in size "
"to send to the developer as a debugging aid.",
)
advanced_group.add_argument(
"--reference-stream",
"--refstream",
"--reference-track",
"--reftrack",
default=None,
help="Which stream/track in the video file to use as reference, "
"formatted according to ffmpeg conventions. For example, s:0 "
"uses the first subtitle track; a:3 would use the fourth audio track.",
)
return parser | null |
20,047 | import errno
import os
import re
import subprocess
import sys
HANDLERS = {}
The provided code snippet includes necessary dependencies for implementing the `register_vcs_handler` function. Write a Python function `def register_vcs_handler(vcs, method)` to solve the following problem:
Decorator to mark a method as the handler for a particular VCS.
Here is the function:
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate | Decorator to mark a method as the handler for a particular VCS. |
20,048 | import errno
import os
import re
import subprocess
import sys
The provided code snippet includes necessary dependencies for implementing the `run_command` function. Write a Python function `def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None)` to solve the following problem:
Call the given command(s).
Here is the function:
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode | Call the given command(s). |
20,050 | import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440-pre"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "ffsubsync-"
cfg.versionfile_source = "ffsubsync/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
The provided code snippet includes necessary dependencies for implementing the `get_versions` function. Write a Python function `def get_versions()` to solve the following problem:
Get version information or return default if unable to do so.
Here is the function:
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None} | Get version information or return default if unable to do so. |
20,051 | import logging
import math
logger = logging.getLogger(__name__)
invphi = (math.sqrt(5) - 1) / 2
invphi2 = (3 - math.sqrt(5)) / 2
The provided code snippet includes necessary dependencies for implementing the `gss` function. Write a Python function `def gss(f, a, b, tol=1e-4)` to solve the following problem:
Golden-section search. Given a function f with a single local minimum in the interval [a,b], gss returns a subset interval [c,d] that contains the minimum with d-c <= tol. Example: >>> f = lambda x: (x-2)**2 >>> a = 1 >>> b = 5 >>> tol = 1e-5 >>> (c,d) = gss(f, a, b, tol) >>> print(c, d) 1.9999959837979107 2.0000050911830893
Here is the function:
def gss(f, a, b, tol=1e-4):
"""Golden-section search.
Given a function f with a single local minimum in
the interval [a,b], gss returns a subset interval
[c,d] that contains the minimum with d-c <= tol.
Example:
>>> f = lambda x: (x-2)**2
>>> a = 1
>>> b = 5
>>> tol = 1e-5
>>> (c,d) = gss(f, a, b, tol)
>>> print(c, d)
1.9999959837979107 2.0000050911830893
"""
(a, b) = (min(a, b), max(a, b))
h = b - a
if h <= tol:
return a, b
# Required steps to achieve tolerance
n = int(math.ceil(math.log(tol / h) / math.log(invphi)))
logger.info(
"About to perform %d iterations of golden section search to find the best framerate",
n,
)
def f_wrapped(x, is_last_iter):
try:
return f(x, is_last_iter)
except TypeError:
return f(x)
c = a + invphi2 * h
d = a + invphi * h
yc = f_wrapped(c, n == 1)
yd = f_wrapped(d, n == 1)
for k in range(n - 1):
if yc < yd:
b = d
d = c
yd = yc
h = invphi * h
c = a + invphi2 * h
yc = f_wrapped(c, k == n - 2)
else:
a = c
c = d
yc = yd
h = invphi * h
d = a + invphi * h
yd = f(d, k == n - 2)
if yc < yd:
return a, d
else:
return c, b | Golden-section search. Given a function f with a single local minimum in the interval [a,b], gss returns a subset interval [c,d] that contains the minimum with d-c <= tol. Example: >>> f = lambda x: (x-2)**2 >>> a = 1 >>> b = 5 >>> tol = 1e-5 >>> (c,d) = gss(f, a, b, tol) >>> print(c, d) 1.9999959837979107 2.0000050911830893 |
20,052 | from collections import defaultdict
from itertools import islice
from typing import Any, Callable, Optional
from typing_extensions import Protocol
class Pipeline:
def __init__(self, steps, verbose=False):
self.steps = steps
self.verbose = verbose
self._validate_steps()
def _validate_steps(self):
names, estimators = zip(*self.steps)
# validate estimators
transformers = estimators[:-1]
estimator = estimators[-1]
for t in transformers:
if t is None or t == "passthrough":
continue
if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(
t, "transform"
):
raise TypeError(
"All intermediate steps should be "
"transformers and implement fit and transform "
"or be the string 'passthrough' "
"'%s' (type %s) doesn't" % (t, type(t))
)
# We allow last estimator to be None as an identity transformation
if (
estimator is not None
and estimator != "passthrough"
and not hasattr(estimator, "fit")
):
raise TypeError(
"Last step of Pipeline should implement fit "
"or be the string 'passthrough'. "
"'%s' (type %s) doesn't" % (estimator, type(estimator))
)
def _iter(self, with_final=True, filter_passthrough=True):
"""
Generate (idx, (name, trans)) tuples from self.steps
When filter_passthrough is True, 'passthrough' and None transformers
are filtered out.
"""
stop = len(self.steps)
if not with_final:
stop -= 1
for idx, (name, trans) in enumerate(islice(self.steps, 0, stop)):
if not filter_passthrough:
yield idx, name, trans
elif trans is not None and trans != "passthrough":
yield idx, name, trans
def __len__(self) -> int:
"""
Returns the length of the Pipeline
"""
return len(self.steps)
def __getitem__(self, ind):
"""Returns a sub-pipeline or a single esimtator in the pipeline
Indexing with an integer will return an estimator; using a slice
returns another Pipeline instance which copies a slice of this
Pipeline. This copy is shallow: modifying (or fitting) estimators in
the sub-pipeline will affect the larger pipeline and vice-versa.
However, replacing a value in `step` will not affect a copy.
"""
if isinstance(ind, slice):
if ind.step not in (1, None):
raise ValueError("Pipeline slicing only supports a step of 1")
return self.__class__(self.steps[ind])
try:
name, est = self.steps[ind]
except TypeError:
# Not an int, try get step by name
return self.named_steps[ind]
return est
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def named_steps(self):
return dict(self.steps)
def _final_estimator(self):
estimator = self.steps[-1][1]
return "passthrough" if estimator is None else estimator
def _log_message(self, step_idx):
if not self.verbose:
return None
name, step = self.steps[step_idx]
return "(step %d of %d) Processing %s" % (step_idx + 1, len(self.steps), name)
# Estimator interface
def _fit(self, X, y=None, **fit_params):
# shallow copy of steps - this should really be steps_
self.steps = list(self.steps)
self._validate_steps()
fit_params_steps = {name: {} for name, step in self.steps if step is not None}
for pname, pval in fit_params.items():
if "__" not in pname:
raise ValueError(
"Pipeline.fit does not accept the {} parameter. "
"You can pass parameters to specific steps of your "
"pipeline using the stepname__parameter format, e.g. "
"`Pipeline.fit(X, y, logisticregression__sample_weight"
"=sample_weight)`.".format(pname)
)
step, param = pname.split("__", 1)
fit_params_steps[step][param] = pval
for step_idx, name, transformer in self._iter(
with_final=False, filter_passthrough=False
):
if transformer is None or transformer == "passthrough":
continue
# Fit or load from cache the current transformer
X, fitted_transformer = _fit_transform_one(
transformer, X, y, None, **fit_params_steps[name]
)
# Replace the transformer of the step with the fitted
# transformer. This is necessary when loading the transformer
# from the cache.
self.steps[step_idx] = (name, fitted_transformer)
if self._final_estimator == "passthrough":
return X, {}
return X, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit the model
Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
self : Pipeline
This estimator
"""
Xt, fit_params = self._fit(X, y, **fit_params)
if self._final_estimator != "passthrough":
self._final_estimator.fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit the model and transform with the final estimator
Fits all the transforms one after the other and transforms the
data, then uses fit_transform on transformed data with the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
Xt : array-like of shape (n_samples, n_transformed_features)
Transformed samples
"""
last_step = self._final_estimator
Xt, fit_params = self._fit(X, y, **fit_params)
if last_step == "passthrough":
return Xt
if hasattr(last_step, "fit_transform"):
return last_step.fit_transform(Xt, y, **fit_params)
else:
return last_step.fit(Xt, y, **fit_params).transform(Xt)
def transform(self):
"""Apply transforms, and transform with the final estimator
This also works where final estimator is ``None``: all prior
transformations are applied.
Parameters
----------
X : iterable
Data to transform. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
Xt : array-like of shape (n_samples, n_transformed_features)
"""
# _final_estimator is None or has transform, otherwise attribute error
# XXX: Handling the None case means we can't use if_delegate_has_method
if self._final_estimator != "passthrough":
self._final_estimator.transform
return self._transform
def _transform(self, X):
Xt = X
for _, _, transform in self._iter():
Xt = transform.transform(Xt)
return Xt
def classes_(self):
return self.steps[-1][-1].classes_
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], "_pairwise", False)
def n_features_in_(self):
# delegate to first step (which will call _check_is_fitted)
return self.steps[0][1].n_features_in_
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [
estimator if isinstance(estimator, str) else type(estimator).__name__.lower()
for estimator in estimators
]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(namecount.items()):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
The provided code snippet includes necessary dependencies for implementing the `make_pipeline` function. Write a Python function `def make_pipeline(*steps, **kwargs) -> Pipeline` to solve the following problem:
Construct a Pipeline from the given estimators. This is a shorthand for the Pipeline constructor; it does not require, and does not permit, naming the estimators. Instead, their names will be set to the lowercase of their types automatically. Parameters ---------- *steps : list of estimators. verbose : bool, default=False If True, the time elapsed while fitting each step will be printed as it is completed. Returns ------- p : Pipeline
Here is the function:
def make_pipeline(*steps, **kwargs) -> Pipeline:
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, their names will be set
to the lowercase of their types automatically.
Parameters
----------
*steps : list of estimators.
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
Returns
-------
p : Pipeline
"""
verbose = kwargs.pop("verbose", False)
if kwargs:
raise TypeError(
'Unknown keyword arguments: "{}"'.format(list(kwargs.keys())[0])
)
return Pipeline(_name_estimators(steps), verbose=verbose) | Construct a Pipeline from the given estimators. This is a shorthand for the Pipeline constructor; it does not require, and does not permit, naming the estimators. Instead, their names will be set to the lowercase of their types automatically. Parameters ---------- *steps : list of estimators. verbose : bool, default=False If True, the time elapsed while fitting each step will be printed as it is completed. Returns ------- p : Pipeline |
20,053 | from collections import defaultdict
from itertools import islice
from typing import Any, Callable, Optional
from typing_extensions import Protocol
def _transform_one(transformer, X, y, weight, **fit_params):
res = transformer.transform(X)
# if we have a weight for this transformer, multiply output
if weight is None:
return res
return res * weight | null |
20,054 | from collections import defaultdict
from itertools import islice
from typing import Any, Callable, Optional
from typing_extensions import Protocol
The provided code snippet includes necessary dependencies for implementing the `_fit_transform_one` function. Write a Python function `def _fit_transform_one(transformer, X, y, weight, **fit_params)` to solve the following problem:
Fits ``transformer`` to ``X`` and ``y``. The transformed result is returned with the fitted transformer. If ``weight`` is not ``None``, the result will be multiplied by ``weight``.
Here is the function:
def _fit_transform_one(transformer, X, y, weight, **fit_params):
"""
Fits ``transformer`` to ``X`` and ``y``. The transformed result is returned
with the fitted transformer. If ``weight`` is not ``None``, the result will
be multiplied by ``weight``.
"""
if hasattr(transformer, "fit_transform"):
res = transformer.fit_transform(X, y, **fit_params)
else:
res = transformer.fit(X, y, **fit_params).transform(X)
if weight is None:
return res, transformer
return res * weight, transformer | Fits ``transformer`` to ``X`` and ``y``. The transformed result is returned with the fitted transformer. If ``weight`` is not ``None``, the result will be multiplied by ``weight``. |
20,055 | from datetime import timedelta
import logging
from typing import Any, cast, List, Optional
import pysubs2
from ffsubsync.sklearn_shim import TransformerMixin
import srt
from ffsubsync.constants import (
DEFAULT_ENCODING,
DEFAULT_MAX_SUBTITLE_SECONDS,
DEFAULT_START_SECONDS,
)
from ffsubsync.file_utils import open_file
from ffsubsync.generic_subtitles import GenericSubtitle, GenericSubtitlesFile, SubsMixin
logger: logging.Logger = logging.getLogger(__name__)
class GenericSubtitle:
def __init__(self, start, end, inner):
self.start = start
self.end = end
self.inner = inner
def __eq__(self, other: object) -> bool:
if not isinstance(other, GenericSubtitle):
return False
eq = True
eq = eq and self.start == other.start
eq = eq and self.end == other.end
eq = eq and self.inner == other.inner
return eq
def content(self) -> str:
if isinstance(self.inner, srt.Subtitle):
ret = self.inner.content
elif isinstance(self.inner, pysubs2.SSAEvent):
ret = self.inner.text
else:
raise NotImplementedError(
"unsupported subtitle type: %s" % type(self.inner)
)
return ret
def resolve_inner_timestamps(self):
ret = copy.deepcopy(self.inner)
if isinstance(self.inner, srt.Subtitle):
ret.start = self.start
ret.end = self.end
elif isinstance(self.inner, pysubs2.SSAEvent):
ret.start = pysubs2.make_time(s=self.start.total_seconds())
ret.end = pysubs2.make_time(s=self.end.total_seconds())
else:
raise NotImplementedError(
"unsupported subtitle type: %s" % type(self.inner)
)
return ret
def merge_with(self, other):
assert isinstance(self.inner, type(other.inner))
inner_merged = copy.deepcopy(self.inner)
if isinstance(self.inner, srt.Subtitle):
inner_merged.content = "{}\n{}".format(
inner_merged.content, other.inner.content
)
return self.__class__(self.start, self.end, inner_merged)
else:
raise NotImplementedError(
"unsupported subtitle type: %s" % type(self.inner)
)
def wrap_inner_subtitle(cls, sub) -> "GenericSubtitle":
if isinstance(sub, srt.Subtitle):
return cls(sub.start, sub.end, sub)
elif isinstance(sub, pysubs2.SSAEvent):
return cls(
timedelta(milliseconds=sub.start), timedelta(milliseconds=sub.end), sub
)
else:
raise NotImplementedError("unsupported subtitle type: %s" % type(sub))
def _preprocess_subs(
subs,
max_subtitle_seconds: Optional[int] = None,
start_seconds: int = 0,
tolerant: bool = True,
) -> List[GenericSubtitle]:
subs_list = []
start_time = timedelta(seconds=start_seconds)
max_duration = timedelta(days=1)
if max_subtitle_seconds is not None:
max_duration = timedelta(seconds=max_subtitle_seconds)
subs = iter(subs)
while True:
try:
next_sub = GenericSubtitle.wrap_inner_subtitle(next(subs))
if next_sub.start < start_time:
continue
next_sub.end = min(next_sub.end, next_sub.start + max_duration)
subs_list.append(next_sub)
# We don't catch SRTParseError here b/c that is typically raised when we
# are trying to parse with the wrong encoding, in which case we might
# be able to try another one on the *entire* set of subtitles elsewhere.
except ValueError as e:
if tolerant:
logger.warning(e)
continue
else:
raise
except StopIteration:
break
return subs_list | null |
20,056 | import logging
import os
import platform
import subprocess
from ffsubsync.constants import SUBSYNC_RESOURCES_ENV_MAGIC
def subprocess_args(include_stdout=True):
# The following is true only on Windows.
if hasattr(subprocess, "STARTUPINFO"):
# On Windows, subprocess calls will pop up a command window by default
# when run from Pyinstaller with the ``--noconsole`` option. Avoid this
# distraction.
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Windows doesn't search the path by default. Pass it an environment so
# it will.
env = os.environ
else:
si = None
env = None
# ``subprocess.check_output`` doesn't allow specifying ``stdout``::
#
# Traceback (most recent call last):
# File "test_subprocess.py", line 58, in <module>
# **subprocess_args(stdout=None))
# File "C:\Python27\lib\subprocess.py", line 567, in check_output
# raise ValueError('stdout argument not allowed, it will be overridden.')
# ValueError: stdout argument not allowed, it will be overridden.
#
# So, add it only if it's needed.
if include_stdout:
ret = {"stdout": subprocess.PIPE}
else:
ret = {}
# On Windows, running this from the binary produced by Pyinstaller
# with the ``--noconsole`` option requires redirecting everything
# (stdin, stdout, stderr) to avoid an OSError exception
# "[Error 6] the handle is invalid."
ret.update(
{
"stdin": subprocess.PIPE,
"stderr": subprocess.PIPE,
"startupinfo": si,
"env": env,
}
)
return ret | null |
20,057 | import argparse
from datetime import datetime
import logging
import os
import shutil
import subprocess
import sys
from typing import cast, Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from ffsubsync.aligners import FFTAligner, MaxScoreAligner
from ffsubsync.constants import (
DEFAULT_APPLY_OFFSET_SECONDS,
DEFAULT_FRAME_RATE,
DEFAULT_MAX_OFFSET_SECONDS,
DEFAULT_MAX_SUBTITLE_SECONDS,
DEFAULT_NON_SPEECH_LABEL,
DEFAULT_START_SECONDS,
DEFAULT_VAD,
DEFAULT_ENCODING,
FRAMERATE_RATIOS,
SAMPLE_RATE,
SUBTITLE_EXTENSIONS,
)
from ffsubsync.ffmpeg_utils import ffmpeg_bin_path
from ffsubsync.sklearn_shim import Pipeline, TransformerMixin
from ffsubsync.speech_transformers import (
VideoSpeechTransformer,
DeserializeSpeechTransformer,
make_subtitle_speech_pipeline,
)
from ffsubsync.subtitle_parser import make_subtitle_parser
from ffsubsync.subtitle_transformers import SubtitleMerger, SubtitleShifter
from ffsubsync.version import get_version
logger: logging.Logger = logging.getLogger(__name__)
def make_test_case(
args: argparse.Namespace, npy_savename: Optional[str], sync_was_successful: bool
) -> int:
def _setup_logging(
args: argparse.Namespace,
) -> Tuple[Optional[str], Optional[logging.FileHandler]]:
def _npy_savename(args: argparse.Namespace) -> str:
def _run_impl(args: argparse.Namespace, result: Dict[str, Any]) -> bool:
def validate_and_transform_args(
parser_or_args: Union[argparse.ArgumentParser, argparse.Namespace]
) -> Optional[argparse.Namespace]:
def run(
parser_or_args: Union[argparse.ArgumentParser, argparse.Namespace]
) -> Dict[str, Any]:
sync_was_successful = False
result = {
"retval": 0,
"offset_seconds": None,
"framerate_scale_factor": None,
}
args = validate_and_transform_args(parser_or_args)
if args is None:
result["retval"] = 1
return result
log_path, log_handler = _setup_logging(args)
try:
sync_was_successful = _run_impl(args, result)
result["sync_was_successful"] = sync_was_successful
return result
finally:
if log_handler is not None and log_path is not None:
log_handler.close()
logger.removeHandler(log_handler)
if args.make_test_case:
result["retval"] += make_test_case(
args, _npy_savename(args), sync_was_successful
)
if args.log_dir_path is None or not os.path.isdir(args.log_dir_path):
os.remove(log_path) | null |
20,058 | import argparse
from datetime import datetime
import logging
import os
import shutil
import subprocess
import sys
from typing import cast, Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from ffsubsync.aligners import FFTAligner, MaxScoreAligner
from ffsubsync.constants import (
DEFAULT_APPLY_OFFSET_SECONDS,
DEFAULT_FRAME_RATE,
DEFAULT_MAX_OFFSET_SECONDS,
DEFAULT_MAX_SUBTITLE_SECONDS,
DEFAULT_NON_SPEECH_LABEL,
DEFAULT_START_SECONDS,
DEFAULT_VAD,
DEFAULT_ENCODING,
FRAMERATE_RATIOS,
SAMPLE_RATE,
SUBTITLE_EXTENSIONS,
)
from ffsubsync.ffmpeg_utils import ffmpeg_bin_path
from ffsubsync.sklearn_shim import Pipeline, TransformerMixin
from ffsubsync.speech_transformers import (
VideoSpeechTransformer,
DeserializeSpeechTransformer,
make_subtitle_speech_pipeline,
)
from ffsubsync.subtitle_parser import make_subtitle_parser
from ffsubsync.subtitle_transformers import SubtitleMerger, SubtitleShifter
from ffsubsync.version import get_version
def add_main_args_for_cli(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"reference",
nargs="?",
help=(
"Reference (video, subtitles, or a numpy array with VAD speech) "
"to which to synchronize input subtitles."
),
)
parser.add_argument(
"-i", "--srtin", nargs="*", help="Input subtitles file (default=stdin)."
)
parser.add_argument(
"-o", "--srtout", help="Output subtitles file (default=stdout)."
)
parser.add_argument(
"--merge-with-reference",
"--merge",
action="store_true",
help="Merge reference subtitles with synced output subtitles.",
)
parser.add_argument(
"--make-test-case",
"--create-test-case",
action="store_true",
help="If specified, serialize reference speech to a numpy array, "
"and create an archive with input/output subtitles "
"and serialized speech.",
)
parser.add_argument(
"--reference-stream",
"--refstream",
"--reference-track",
"--reftrack",
default=None,
help=(
"Which stream/track in the video file to use as reference, "
"formatted according to ffmpeg conventions. For example, 0:s:0 "
"uses the first subtitle track; 0:a:3 would use the third audio track. "
"You can also drop the leading `0:`; i.e. use s:0 or a:3, respectively. "
"Example: `ffs ref.mkv -i in.srt -o out.srt --reference-stream s:2`"
),
)
def add_cli_only_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"-v",
"--version",
action="version",
version="{package} {version}".format(
package=__package__, version=get_version()
),
)
parser.add_argument(
"--overwrite-input",
action="store_true",
help=(
"If specified, will overwrite the input srt "
"instead of writing the output to a new file."
),
)
parser.add_argument(
"--encoding",
default=DEFAULT_ENCODING,
help="What encoding to use for reading input subtitles "
"(default=%s)." % DEFAULT_ENCODING,
)
parser.add_argument(
"--max-subtitle-seconds",
type=float,
default=DEFAULT_MAX_SUBTITLE_SECONDS,
help="Maximum duration for a subtitle to appear on-screen "
"(default=%.3f seconds)." % DEFAULT_MAX_SUBTITLE_SECONDS,
)
parser.add_argument(
"--start-seconds",
type=int,
default=DEFAULT_START_SECONDS,
help="Start time for processing "
"(default=%d seconds)." % DEFAULT_START_SECONDS,
)
parser.add_argument(
"--max-offset-seconds",
type=float,
default=DEFAULT_MAX_OFFSET_SECONDS,
help="The max allowed offset seconds for any subtitle segment "
"(default=%d seconds)." % DEFAULT_MAX_OFFSET_SECONDS,
)
parser.add_argument(
"--apply-offset-seconds",
type=float,
default=DEFAULT_APPLY_OFFSET_SECONDS,
help="Apply a predefined offset in seconds to all subtitle segments "
"(default=%d seconds)." % DEFAULT_APPLY_OFFSET_SECONDS,
)
parser.add_argument(
"--frame-rate",
type=int,
default=DEFAULT_FRAME_RATE,
help="Frame rate for audio extraction (default=%d)." % DEFAULT_FRAME_RATE,
)
parser.add_argument(
"--skip-infer-framerate-ratio",
action="store_true",
help="If set, do not try to infer framerate ratio based on duration ratio.",
)
parser.add_argument(
"--non-speech-label",
type=float,
default=DEFAULT_NON_SPEECH_LABEL,
help="Label to use for frames detected as non-speech (default=%f)"
% DEFAULT_NON_SPEECH_LABEL,
)
parser.add_argument(
"--output-encoding",
default="utf-8",
help="What encoding to use for writing output subtitles "
'(default=utf-8). Can indicate "same" to use same '
"encoding as that of the input.",
)
parser.add_argument(
"--reference-encoding",
help="What encoding to use for reading / writing reference subtitles "
"(if applicable, default=infer).",
)
parser.add_argument(
"--vad",
choices=[
"subs_then_webrtc",
"webrtc",
"subs_then_auditok",
"auditok",
"subs_then_silero",
"silero",
],
default=None,
help="Which voice activity detector to use for speech extraction "
"(if using video / audio as a reference, default={}).".format(DEFAULT_VAD),
)
parser.add_argument(
"--no-fix-framerate",
action="store_true",
help="If specified, subsync will not attempt to correct a framerate "
"mismatch between reference and subtitles.",
)
parser.add_argument(
"--serialize-speech",
action="store_true",
help="If specified, serialize reference speech to a numpy array.",
)
parser.add_argument(
"--extract-subs-from-stream",
"--extract-subtitles-from-stream",
default=None,
help="If specified, do not attempt sync; instead, just extract subtitles"
" from the specified stream using the reference.",
)
parser.add_argument(
"--suppress-output-if-offset-less-than",
type=float,
default=None,
help="If specified, do not produce output if offset below provided threshold.",
)
parser.add_argument(
"--ffmpeg-path",
"--ffmpegpath",
default=None,
help="Where to look for ffmpeg and ffprobe. Uses the system PATH by default.",
)
parser.add_argument(
"--log-dir-path",
default=None,
help=(
"If provided, will save log file ffsubsync.log to this path "
"(must be an existing directory)."
),
)
parser.add_argument(
"--gss",
action="store_true",
help="If specified, use golden-section search to try to find"
"the optimal framerate ratio between video and subtitles.",
)
parser.add_argument(
"--strict",
action="store_true",
help="If specified, refuse to parse srt files with formatting issues.",
)
parser.add_argument("--vlc-mode", action="store_true", help=argparse.SUPPRESS)
parser.add_argument("--gui-mode", action="store_true", help=argparse.SUPPRESS)
parser.add_argument("--skip-sync", action="store_true", help=argparse.SUPPRESS)
def make_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Synchronize subtitles with video.")
add_main_args_for_cli(parser)
add_cli_only_args(parser)
return parser | null |
20,059 | import os
from contextlib import contextmanager
import logging
import io
import subprocess
import sys
from datetime import timedelta
from typing import cast, Callable, Dict, List, Optional, Union
import ffmpeg
import numpy as np
import tqdm
from ffsubsync.constants import (
DEFAULT_ENCODING,
DEFAULT_MAX_SUBTITLE_SECONDS,
DEFAULT_SCALE_FACTOR,
DEFAULT_START_SECONDS,
SAMPLE_RATE,
)
from ffsubsync.ffmpeg_utils import ffmpeg_bin_path, subprocess_args
from ffsubsync.generic_subtitles import GenericSubtitle
from ffsubsync.sklearn_shim import TransformerMixin
from ffsubsync.sklearn_shim import Pipeline
from ffsubsync.subtitle_parser import make_subtitle_parser
from ffsubsync.subtitle_transformers import SubtitleScaler
logger: logging.Logger = logging.getLogger(__name__)
def _make_auditok_detector(
sample_rate: int, frame_rate: int, non_speech_label: float
) -> Callable[[bytes], np.ndarray]:
try:
from auditok import (
BufferAudioSource,
ADSFactory,
AudioEnergyValidator,
StreamTokenizer,
)
except ImportError as e:
logger.error(
"""Error: auditok not installed!
Consider installing it with `pip install auditok`. Note that auditok
is GPLv3 licensed, which means that successfully importing it at
runtime creates a derivative work that is GPLv3 licensed. For personal
use this is fine, but note that any commercial use that relies on
auditok must be open source as per the GPLv3!*
*Not legal advice. Consult with a lawyer.
"""
)
raise e
bytes_per_frame = 2
frames_per_window = frame_rate // sample_rate
validator = AudioEnergyValidator(sample_width=bytes_per_frame, energy_threshold=50)
tokenizer = StreamTokenizer(
validator=validator,
min_length=0.2 * sample_rate,
max_length=int(5 * sample_rate),
max_continuous_silence=0.25 * sample_rate,
)
def _detect(asegment: bytes) -> np.ndarray:
asource = BufferAudioSource(
data_buffer=asegment,
sampling_rate=frame_rate,
sample_width=bytes_per_frame,
channels=1,
)
ads = ADSFactory.ads(audio_source=asource, block_dur=1.0 / sample_rate)
ads.open()
tokens = tokenizer.tokenize(ads)
length = (
len(asegment) // bytes_per_frame + frames_per_window - 1
) // frames_per_window
media_bstring = np.zeros(length + 1)
for token in tokens:
media_bstring[token[1]] = 1.0
media_bstring[token[2] + 1] = non_speech_label - 1.0
return np.clip(np.cumsum(media_bstring)[:-1], 0.0, 1.0)
return _detect | null |
20,060 | import os
from contextlib import contextmanager
import logging
import io
import subprocess
import sys
from datetime import timedelta
from typing import cast, Callable, Dict, List, Optional, Union
import ffmpeg
import numpy as np
import tqdm
from ffsubsync.constants import (
DEFAULT_ENCODING,
DEFAULT_MAX_SUBTITLE_SECONDS,
DEFAULT_SCALE_FACTOR,
DEFAULT_START_SECONDS,
SAMPLE_RATE,
)
from ffsubsync.ffmpeg_utils import ffmpeg_bin_path, subprocess_args
from ffsubsync.generic_subtitles import GenericSubtitle
from ffsubsync.sklearn_shim import TransformerMixin
from ffsubsync.sklearn_shim import Pipeline
from ffsubsync.subtitle_parser import make_subtitle_parser
from ffsubsync.subtitle_transformers import SubtitleScaler
def _make_webrtcvad_detector(
sample_rate: int, frame_rate: int, non_speech_label: float
) -> Callable[[bytes], np.ndarray]:
import webrtcvad
vad = webrtcvad.Vad()
vad.set_mode(3) # set non-speech pruning aggressiveness from 0 to 3
window_duration = 1.0 / sample_rate # duration in seconds
frames_per_window = int(window_duration * frame_rate + 0.5)
bytes_per_frame = 2
def _detect(asegment: bytes) -> np.ndarray:
media_bstring = []
failures = 0
for start in range(0, len(asegment) // bytes_per_frame, frames_per_window):
stop = min(start + frames_per_window, len(asegment) // bytes_per_frame)
try:
is_speech = vad.is_speech(
asegment[start * bytes_per_frame : stop * bytes_per_frame],
sample_rate=frame_rate,
)
except Exception:
is_speech = False
failures += 1
# webrtcvad has low recall on mode 3, so treat non-speech as "not sure"
media_bstring.append(1.0 if is_speech else non_speech_label)
return np.array(media_bstring)
return _detect | null |
20,061 | import os
from contextlib import contextmanager
import logging
import io
import subprocess
import sys
from datetime import timedelta
from typing import cast, Callable, Dict, List, Optional, Union
import ffmpeg
import numpy as np
import tqdm
from ffsubsync.constants import (
DEFAULT_ENCODING,
DEFAULT_MAX_SUBTITLE_SECONDS,
DEFAULT_SCALE_FACTOR,
DEFAULT_START_SECONDS,
SAMPLE_RATE,
)
from ffsubsync.ffmpeg_utils import ffmpeg_bin_path, subprocess_args
from ffsubsync.generic_subtitles import GenericSubtitle
from ffsubsync.sklearn_shim import TransformerMixin
from ffsubsync.sklearn_shim import Pipeline
from ffsubsync.subtitle_parser import make_subtitle_parser
from ffsubsync.subtitle_transformers import SubtitleScaler
logger: logging.Logger = logging.getLogger(__name__)
def _make_silero_detector(
sample_rate: int, frame_rate: int, non_speech_label: float
) -> Callable[[bytes], np.ndarray]:
import torch
window_duration = 1.0 / sample_rate # duration in seconds
frames_per_window = int(window_duration * frame_rate + 0.5)
bytes_per_frame = 1
model, _ = torch.hub.load(
repo_or_dir="snakers4/silero-vad",
model="silero_vad",
force_reload=False,
onnx=False,
)
exception_logged = False
def _detect(asegment) -> np.ndarray:
asegment = np.frombuffer(asegment, np.int16).astype(np.float32) / (1 << 15)
asegment = torch.FloatTensor(asegment)
media_bstring = []
failures = 0
for start in range(0, len(asegment) // bytes_per_frame, frames_per_window):
stop = min(start + frames_per_window, len(asegment))
try:
speech_prob = model(
asegment[start * bytes_per_frame : stop * bytes_per_frame],
frame_rate,
).item()
except Exception:
nonlocal exception_logged
if not exception_logged:
exception_logged = True
logger.exception("exception occurred during speech detection")
speech_prob = 0.0
failures += 1
media_bstring.append(1.0 - (1.0 - speech_prob) * (1.0 - non_speech_label))
return np.array(media_bstring)
return _detect | null |
20,062 | import os
from contextlib import contextmanager
import logging
import io
import subprocess
import sys
from datetime import timedelta
from typing import cast, Callable, Dict, List, Optional, Union
import ffmpeg
import numpy as np
import tqdm
from ffsubsync.constants import (
DEFAULT_ENCODING,
DEFAULT_MAX_SUBTITLE_SECONDS,
DEFAULT_SCALE_FACTOR,
DEFAULT_START_SECONDS,
SAMPLE_RATE,
)
from ffsubsync.ffmpeg_utils import ffmpeg_bin_path, subprocess_args
from ffsubsync.generic_subtitles import GenericSubtitle
from ffsubsync.sklearn_shim import TransformerMixin
from ffsubsync.sklearn_shim import Pipeline
from ffsubsync.subtitle_parser import make_subtitle_parser
from ffsubsync.subtitle_transformers import SubtitleScaler
_PAIRED_NESTER: Dict[str, str] = {
"(": ")",
"{": "}",
"[": "]",
# FIXME: False positive sometimes when there are html tags, e.g. <i> Hello? </i>
# '<': '>',
}
def _is_metadata(content: str, is_beginning_or_end: bool) -> bool:
content = content.strip()
if len(content) == 0:
return True
if (
content[0] in _PAIRED_NESTER.keys()
and content[-1] == _PAIRED_NESTER[content[0]]
):
return True
if is_beginning_or_end:
if "english" in content.lower():
return True
if " - " in content:
return True
return False | null |
20,063 | import bpy
from .declarations import Macros, Operators, WorkSpaceTools
from .stateful_operator.utilities.keymap import tool_invoke_kmi
addon_keymaps = []
class Operators(str, Enum):
AddAngle = "view3d.slvs_add_angle"
AddArc2D = "view3d.slvs_add_arc2d"
AddCircle2D = "view3d.slvs_add_circle2d"
AddCoincident = "view3d.slvs_add_coincident"
AddDiameter = "view3d.slvs_add_diameter"
AddDistance = "view3d.slvs_add_distance"
AddEqual = "view3d.slvs_add_equal"
AddHorizontal = "view3d.slvs_add_horizontal"
AddLine2D = "view3d.slvs_add_line2d"
AddLine3D = "view3d.slvs_add_line3d"
AddMidPoint = "view3d.slvs_add_midpoint"
AddParallel = "view3d.slvs_add_parallel"
AddPerpendicular = "view3d.slvs_add_perpendicular"
AddPoint2D = "view3d.slvs_add_point2d"
AddPoint3D = "view3d.slvs_add_point3d"
AddPresetTheme = "bgs.theme_preset_add"
AddRatio = "view3d.slvs_add_ratio"
AddRectangle = "view3d.slvs_add_rectangle"
AddSketch = "view3d.slvs_add_sketch"
AddTangent = "view3d.slvs_add_tangent"
AddVertical = "view3d.slvs_add_vertical"
AddWorkPlane = "view3d.slvs_add_workplane"
AddWorkPlaneFace = "view3d.slvs_add_workplane_face"
AlignWorkplaneCursor = "view3d.slvs_align_workplane_cursor"
BatchSet = "view3d.slvs_batch_set"
ContextMenu = "view3d.slvs_context_menu"
Copy = "view3d.slvs_copy"
DeleteConstraint = "view3d.slvs_delete_constraint"
DeleteEntity = "view3d.slvs_delete_entity"
InstallPackage = "view3d.slvs_install_package"
Paste = "view3d.slvs_paste"
Move = "view3d.slvs_move"
Offset = "view3d.slvs_offset"
NodeFill = "view3d.slvs_node_fill"
NodeExtrude = "view3d.slvs_node_extrude"
NodeArrayLinear = "view3d.slvs_node_array_linear"
RegisterDrawCB = "view3d.slvs_register_draw_cb"
Select = "view3d.slvs_select"
SelectAll = "view3d.slvs_select_all"
SelectBox = "view3d.slvs_select_box"
SelectInvert = "view3d.slvs_select_invert"
SelectExtendAll = "view3d.slvs_select_extend_all"
SelectExtend = "view3d.slvs_select_extend"
SetActiveSketch = "view3d.slvs_set_active_sketch"
SetAllConstraintsVisibility = "view3d.slvs_set_all_constraints_visibility"
ShowSolverState = "view3d.slvs_show_solver_state"
Solve = "view3d.slvs_solve"
Update = "view3d.slvs_update"
Trim = "view3d.slvs_trim"
Bevel = "view3d.slvs_bevel"
Tweak = "view3d.slvs_tweak"
TweakConstraintValuePos = "view3d.slvs_tweak_constraint_value_pos"
UnregisterDrawCB = "view3d.slvs_unregister_draw_cb"
WriteSelectionTexture = "view3d.slvs_write_selection_texture"
class WorkSpaceTools(str, Enum):
AddArc2D = "sketcher.slvs_add_arc2d"
AddCircle2D = "sketcher.slvs_add_circle2d"
AddLine2D = "sketcher.slvs_add_line2d"
AddLine3D = "sketcher.slvs_add_line3d"
AddPoint2D = "sketcher.slvs_add_point2d"
AddPoint3D = "sketcher.slvs_add_point3d"
AddRectangle = "sketcher.slvs_add_rectangle"
AddWorkplane = "sketcher.slvs_add_workplane"
AddWorkplaneFace = "sketcher.slvs_add_workplane_face"
Offset = "sketcher.slvs_offset"
Select = "sketcher.slvs_select"
Trim = "sketcher.slvs_trim"
Bevel = "sketcher.slvs_bevel"
def register():
wm = bpy.context.window_manager
if wm.keyconfigs.addon:
km = wm.keyconfigs.addon.keymaps.new(name="Object Mode", space_type="EMPTY")
# Select
kmi = km.keymap_items.new("wm.tool_set_by_id", "ESC", "PRESS", shift=True)
kmi.properties.name = WorkSpaceTools.Select
addon_keymaps.append((km, kmi))
# Add Sketch
kmi = km.keymap_items.new(
Operators.AddSketch, "A", "PRESS", ctrl=True, shift=True
)
kmi.properties.wait_for_input = True
addon_keymaps.append((km, kmi))
# Leave Sketch
kmi = km.keymap_items.new(
Operators.SetActiveSketch, "X", "PRESS", ctrl=True, shift=True
)
kmi.properties.index = -1
addon_keymaps.append((km, kmi)) | null |
20,064 | import bpy
from .declarations import Macros, Operators, WorkSpaceTools
from .stateful_operator.utilities.keymap import tool_invoke_kmi
addon_keymaps = []
def unregister():
wm = bpy.context.window_manager
kc = wm.keyconfigs.addon
if kc:
for km, kmi in addon_keymaps:
km.keymap_items.remove(kmi)
addon_keymaps.clear() | null |
20,065 | import logging
import bpy
import gpu
from bpy.types import Context, Operator
from bpy.utils import register_class, unregister_class
from . import global_data
from .utilities.preferences import use_experimental
from .declarations import Operators
def draw_selection_buffer(context: Context):
def ensure_selection_texture(context: Context):
if not global_data.redraw_selection_buffer:
return
draw_selection_buffer(context)
global_data.redraw_selection_buffer = False | null |
20,066 | import logging
import bpy
import gpu
from bpy.types import Context, Operator
from bpy.utils import register_class, unregister_class
from . import global_data
from .utilities.preferences import use_experimental
from .declarations import Operators
def update_elements(context: Context, force: bool = False):
"""
TODO: Avoid to always update batches and selection texture
"""
entities = list(context.scene.sketcher.entities.all)
for e in entities:
if not hasattr(e, "update"):
continue
if not force and not e.is_dirty:
continue
e.update()
def _get_msg():
msg = "Update geometry batches:"
for e in entities:
if not e.is_dirty:
continue
msg += "\n - " + str(e)
return msg
if logger.isEnabledFor(logging.DEBUG):
logger.debug(_get_msg())
def draw_elements(context: Context):
for entity in reversed(list(context.scene.sketcher.entities.all)):
if hasattr(entity, "draw"):
entity.draw(context)
def use_experimental(setting, fallback):
"""Ensure experimental setting is unused when not in experimental mode"""
if not is_experimental():
return fallback
prefs = get_prefs()
return getattr(prefs, setting)
def draw_cb():
context = bpy.context
force = use_experimental("force_redraw", True)
update_elements(context, force=force)
draw_elements(context)
global_data.redraw_selection_buffer = True | null |
20,067 | import logging
import bpy
import gpu
from bpy.types import Context, Operator
from bpy.utils import register_class, unregister_class
from . import global_data
from .utilities.preferences import use_experimental
from .declarations import Operators
class View3D_OT_slvs_register_draw_cb(Operator):
bl_idname = Operators.RegisterDrawCB
bl_label = "Register Draw Callback"
def execute(self, context: Context):
global_data.draw_handle = bpy.types.SpaceView3D.draw_handler_add(
draw_cb, (), "WINDOW", "POST_VIEW"
)
return {"FINISHED"}
class View3D_OT_slvs_unregister_draw_cb(Operator):
bl_idname = Operators.UnregisterDrawCB
bl_label = ""
def execute(self, context: Context):
global_data.draw_handler.remove_handle()
return {"FINISHED"}
def register():
register_class(View3D_OT_slvs_register_draw_cb)
register_class(View3D_OT_slvs_unregister_draw_cb) | null |
20,068 | import logging
import bpy
import gpu
from bpy.types import Context, Operator
from bpy.utils import register_class, unregister_class
from . import global_data
from .utilities.preferences import use_experimental
from .declarations import Operators
class View3D_OT_slvs_register_draw_cb(Operator):
bl_idname = Operators.RegisterDrawCB
bl_label = "Register Draw Callback"
def execute(self, context: Context):
global_data.draw_handle = bpy.types.SpaceView3D.draw_handler_add(
draw_cb, (), "WINDOW", "POST_VIEW"
)
return {"FINISHED"}
class View3D_OT_slvs_unregister_draw_cb(Operator):
bl_idname = Operators.UnregisterDrawCB
bl_label = ""
def execute(self, context: Context):
global_data.draw_handler.remove_handle()
return {"FINISHED"}
def unregister():
unregister_class(View3D_OT_slvs_unregister_draw_cb)
unregister_class(View3D_OT_slvs_register_draw_cb) | null |
20,069 | from pathlib import Path
from functools import cache
import gpu
import bpy
import bpy.utils.previews
from gpu_extras.batch import batch_for_shader
from bpy.app import background
from .declarations import Operators
from .shaders import Shaders
def get_folder_path():
return Path(__file__).parent / "resources" / "icons"
def get_icon(name: str):
return str(get_folder_path() / name) | null |
20,070 | from pathlib import Path
from functools import cache
import gpu
import bpy
import bpy.utils.previews
from gpu_extras.batch import batch_for_shader
from bpy.app import background
from .declarations import Operators
from .shaders import Shaders
icons = {}
def unload_preview_icons():
global preview_icons
if not preview_icons:
return
preview_icons.clear()
bpy.utils.previews.remove(preview_icons)
preview_icons = None
def unload():
global icons
unload_preview_icons()
icons = {} | null |
20,071 | from pathlib import Path
from functools import cache
import gpu
import bpy
import bpy.utils.previews
from gpu_extras.batch import batch_for_shader
from bpy.app import background
from .declarations import Operators
from .shaders import Shaders
preview_icons = None
def get_constraint_icon(operator: str):
if not preview_icons:
return -1
icon = preview_icons.get(operator)
if not icon:
return -1
return icon.icon_id | null |
20,072 | from pathlib import Path
from functools import cache
import gpu
import bpy
import bpy.utils.previews
from gpu_extras.batch import batch_for_shader
from bpy.app import background
from .declarations import Operators
from .shaders import Shaders
icons = {}
def _get_shader():
return Shaders.uniform_color_image_2d()
def _get_batch():
return batch_for_shader(_get_shader(), "TRI_FAN", {
"pos": ((-.5, -.5), (.5, -.5), (.5, .5), (-.5, .5)),
"texCoord": ((0, 0), (1, 0), (1, 1), (0, 1)),
})
def draw(type, color):
texture = icons.get(type)
if not texture:
return
gpu.state.blend_set("ALPHA")
shader, batch = _get_shader(), _get_batch()
shader.bind()
shader.uniform_float("color", color)
shader.uniform_sampler("image", texture)
batch.draw(shader)
gpu.state.blend_set("NONE") | null |
20,073 | from bpy.types import Context, UILayout
from .. import declarations
from . import VIEW3D_PT_sketcher_base
from .. import declarations
def sketch_selector(
context: Context,
layout: UILayout,
):
row = layout.row(align=True)
row.scale_y = 1.8
active_sketch = context.scene.sketcher.active_sketch
if not active_sketch:
row.operator(
declarations.Operators.AddSketch,
icon="ADD"
).wait_for_input = True
else:
row.operator(
declarations.Operators.SetActiveSketch,
text="Leave: " + active_sketch.name,
icon="BACK",
depress=True,
).index = -1
row.active = True
row.operator(declarations.Operators.Update, icon="FILE_REFRESH", text="") | null |
20,074 | from bpy.types import Context, UILayout
from .. import declarations
from .. import types
from . import VIEW3D_PT_sketcher_base
from .. import declarations
The provided code snippet includes necessary dependencies for implementing the `draw_constraint_listitem` function. Write a Python function `def draw_constraint_listitem( context: Context, layout: UILayout, constraint: types.GenericConstraint )` to solve the following problem:
Creates a single row inside the ``layout`` describing the ``constraint``.
Here is the function:
def draw_constraint_listitem(
context: Context, layout: UILayout, constraint: types.GenericConstraint
):
"""
Creates a single row inside the ``layout`` describing
the ``constraint``.
"""
index = context.scene.sketcher.constraints.get_index(constraint)
row = layout.row()
# Visible/Hidden property
row.prop(
constraint,
"visible",
icon_only=True,
icon=("HIDE_OFF" if constraint.visible else "HIDE_ON"),
emboss=False,
)
# Failed hint
row.label(
text="",
icon=("ERROR" if constraint.failed else "CHECKMARK"),
)
# Label
row.prop(constraint, "name", text="")
# Constraint Values
middle_sub = row.row()
for constraint_prop in constraint.props:
middle_sub.prop(constraint, constraint_prop, text="")
# Context menu, shows constraint name
props = row.operator(
declarations.Operators.ContextMenu,
text="",
icon="OUTLINER_DATA_GP_LAYER",
emboss=False,
)
props.type = constraint.type
props.index = index
props.highlight_hover = True
props.highlight_active = True
props.highlight_members = True
# Delete operator
props = row.operator(
declarations.Operators.DeleteConstraint,
text="",
icon="X",
emboss=False,
)
props.type = constraint.type
props.index = index
props.highlight_hover = True
props.highlight_members = True | Creates a single row inside the ``layout`` describing the ``constraint``. |
20,075 | from bpy.types import Menu
from ..declarations import Operators, Menus
from typing import Iterable
def _get_value_icon(collection: Iterable, property: str, default: bool) -> bool:
values = [getattr(item, property) for item in collection]
if all(values):
return False, "CHECKBOX_HLT"
if not any(values):
return True, "CHECKBOX_DEHLT"
return default, "SELECT_SUBTRACT" | null |
20,076 | import gpu
from bpy.types import Gizmo, GizmoGroup
from .. import global_data
from ..declarations import Gizmos, GizmoGroups
from ..draw_handler import ensure_selection_texture
from ..utilities.index import rgb_to_index
from .utilities import context_mode_check
def _spiral(N, M):
x,y = 0,0
dx, dy = 0, -1
for dumb in range(N*M):
if abs(x) == abs(y) and [dx,dy] != [1,0] or x>0 and y == 1-x:
dx, dy = -dy, dx # corner, change direction
if abs(x)>N/2 or abs(y)>M/2: # non-square
dx, dy = -dy, dx # change direction
x, y = -y+dx, x+dy # jump
yield x, y
x, y = x+dx, y+dy
The provided code snippet includes necessary dependencies for implementing the `get_spiral_coords` function. Write a Python function `def get_spiral_coords(X: int, Y: int, width: int, height: int, radius: int = 0)` to solve the following problem:
Returns a list of coordinates to check starting from given position spiraling out
Here is the function:
def get_spiral_coords(X: int, Y: int, width: int, height: int, radius: int = 0):
"""Returns a list of coordinates to check starting from given position spiraling out"""
for x, y in _spiral(radius+1,radius+1):
if 0 <= (X+x) <= width and 0 <= (Y+y) <= height:
yield (X+x, Y+y) | Returns a list of coordinates to check starting from given position spiraling out |
20,077 | import math
import blf
import gpu
from bpy.types import Gizmo, GizmoGroup
from mathutils import Vector, Matrix
from .. import icon_manager, units
from ..declarations import Gizmos, GizmoGroups, Operators
from ..utilities.preferences import get_prefs
from ..utilities.view import get_2d_coords
from .base import ConstraintGizmo
from .utilities import Color, get_color, set_gizmo_colors
from ..utilities.view import get_scale_from_pos
def _get_formatted_value(context, constr):
unit = constr.rna_type.properties["value"].unit
value = constr.value
if unit == "LENGTH":
if constr.type == "DIAMETER" and constr.setting:
s = "R" + units.format_distance(value)
else:
s = units.format_distance(value)
return s
elif unit == "ROTATION":
return units.format_angle(value)
return "" | null |
20,078 | import math
from enum import Enum, auto
from mathutils import Matrix
from ..model.types import GenericConstraint
from ..utilities.constants import QUARTER_TURN
from ..utilities.preferences import get_prefs
def get_constraint_color_type(constraint: GenericConstraint):
def get_color(color_type: Color, highlit: bool):
def set_gizmo_colors(gz, constraint):
color_type = get_constraint_color_type(constraint)
color = get_color(color_type, highlit=False)
color_highlight = get_color(color_type, highlit=True)
gz.color = color[0:-1]
gz.alpha = color[-1]
gz.color_highlight = color_highlight[0:-1]
gz.alpha_highlight = color_highlight[-1] | null |
20,079 | import math
from enum import Enum, auto
from mathutils import Matrix
from ..model.types import GenericConstraint
from ..utilities.constants import QUARTER_TURN
from ..utilities.preferences import get_prefs
QUARTER_TURN = tau / 4
def draw_arrow_shape(target, shoulder, width, is_3d=False):
v = shoulder - target
mat = Matrix.Rotation(QUARTER_TURN, (3 if is_3d else 2), "Z")
v.rotate(mat)
v.length = abs(width / 2)
return (
((shoulder + v)),
target,
target,
((shoulder - v)),
((shoulder - v)),
((shoulder + v)),
) | null |
20,080 | import math
from enum import Enum, auto
from mathutils import Matrix
from ..model.types import GenericConstraint
from ..utilities.constants import QUARTER_TURN
from ..utilities.preferences import get_prefs
def get_prefs():
return bpy.context.preferences.addons[get_name()].preferences
def get_arrow_size(dist, scale):
size = scale * 0.01 * get_prefs().arrow_scale
size = min(size, abs(dist * 0.67))
size = math.copysign(size, dist)
return size, size / 2 | null |
20,081 | import math
from enum import Enum, auto
from mathutils import Matrix
from ..model.types import GenericConstraint
from ..utilities.constants import QUARTER_TURN
from ..utilities.preferences import get_prefs
def get_prefs():
return bpy.context.preferences.addons[get_name()].preferences
def get_overshoot(scale, dir):
if dir == 0:
return 0
# use factor of 0.005 for one-half arrowhead
overshoot = scale * 0.005 * get_prefs().arrow_scale
return -math.copysign(overshoot, dir) | null |
20,082 | import math
from enum import Enum, auto
from mathutils import Matrix
from ..model.types import GenericConstraint
from ..utilities.constants import QUARTER_TURN
from ..utilities.preferences import get_prefs
def context_mode_check(context, widget_group):
tools = context.workspace.tools
mode = context.mode
for tool in tools:
if (tool.widget == widget_group) and (tool.mode == mode):
break
else:
context.window_manager.gizmo_group_type_unlink_delayed(widget_group)
return False
return True | null |
20,083 | import bpy
import logging
logger = logging.getLogger(__name__)
def update_pointers(scene, index_old, index_new):
"""Replaces all references to an entity index with its new index"""
logger.debug("Update references {} -> {}".format(index_old, index_new))
# NOTE: this should go through all entity pointers and update them if necessary.
# It might be possible to use the msgbus to notify and update the IntProperty pointers
if scene.sketcher.active_sketch_i == index_old:
logger.debug(
"Update reference {} of {} to {}: ".format(
"active_sketch", scene.sketcher, index_new
)
)
scene.sketcher.active_sketch_i = index_new
for o in scene.sketcher.all:
if not hasattr(o, "update_pointers"):
continue
o.update_pointers(index_old, index_new)
scene.sketcher.purge_stale_data()
The provided code snippet includes necessary dependencies for implementing the `recalc_pointers` function. Write a Python function `def recalc_pointers(scene)` to solve the following problem:
Updates type index of entities keeping local index as is
Here is the function:
def recalc_pointers(scene):
"""Updates type index of entities keeping local index as is"""
# TODO: Move to utilities.data_handling
from .model.utilities import update_pointers
msg = ""
entities = list(scene.sketcher.entities.all)
for e in reversed(entities):
i = e.slvs_index
# scene.sketcher.entities._set_index(e)
scene.sketcher.entities.recalc_type_index(e)
if i != e.slvs_index:
msg += "\n - {}: {} -> {}".format(e, i, e.slvs_index)
update_pointers(scene, i, e.slvs_index)
if msg:
logger.debug("Update entity indices:" + msg) | Updates type index of entities keeping local index as is |
20,084 | import logging
from typing import List
from bpy.types import Scene
from ..model.types import SlvsGenericEntity
The provided code snippet includes necessary dependencies for implementing the `point_entity_mapping` function. Write a Python function `def point_entity_mapping(scene)` to solve the following problem:
Get a entities per point mapping
Here is the function:
def point_entity_mapping(scene):
"""Get a entities per point mapping"""
points = []
entities = []
for entity in scene.sketcher.entities.all:
if entity.is_point():
continue
if not hasattr(entity, "connection_points"):
continue
for p in entity.connection_points():
if not p.is_point():
continue
if p not in points:
points.append(p)
i = points.index(p)
if i >= len(entities):
entities.append([])
ents = entities[i]
if entity not in ents:
ents.append(entity)
assert len(points) == len(entities)
return points, entities | Get a entities per point mapping |
20,085 | import logging
from typing import List
from bpy.types import Scene
from ..model.types import SlvsGenericEntity
def shares_point(seg_1, seg_2):
points = seg_1.connection_points()
for p in seg_2.connection_points():
if p in points:
return True
return False | null |
20,086 | from mathutils import Vector
from math import sin, cos
from .constants import FULL_TURN
def pol2cart(radius: float, angle: float) -> Vector:
x = radius * cos(angle)
y = radius * sin(angle)
return Vector((x, y)) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.