id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
14,554 | import os
import h5py
import logging
import tqdm
import subprocess
import os.path as osp
import numpy as np
from pathlib import Path
from src.utils.colmap.read_write_model import CAMERA_MODEL_NAMES, Image, read_cameras_binary, read_images_binary
from src.utils.colmap.database import COLMAPDatabase
class COLMAPDatabase(sqlite3.Connection):
def connect(database_path):
return sqlite3.connect(str(database_path), factory=COLMAPDatabase)
def __init__(self, *args, **kwargs):
super(COLMAPDatabase, self).__init__(*args, **kwargs)
self.create_tables = lambda: self.executescript(CREATE_ALL)
self.create_cameras_table = \
lambda: self.executescript(CREATE_CAMERAS_TABLE)
self.create_descriptors_table = \
lambda: self.executescript(CREATE_DESCRIPTORS_TABLE)
self.create_images_table = \
lambda: self.executescript(CREATE_IMAGES_TABLE)
self.create_two_view_geometries_table = \
lambda: self.executescript(CREATE_TWO_VIEW_GEOMETRIES_TABLE)
self.create_keypoints_table = \
lambda: self.executescript(CREATE_KEYPOINTS_TABLE)
self.create_matches_table = \
lambda: self.executescript(CREATE_MATCHES_TABLE)
self.create_name_index = lambda: self.executescript(CREATE_NAME_INDEX)
def add_camera(self, model, width, height, params,
prior_focal_length=False, camera_id=None):
params = np.asarray(params, np.float64)
cursor = self.execute(
"INSERT INTO cameras VALUES (?, ?, ?, ?, ?, ?)",
(camera_id, model, width, height, array_to_blob(params),
prior_focal_length))
return cursor.lastrowid
def add_image(self, name, camera_id,
prior_q=np.zeros(4), prior_t=np.zeros(3), image_id=None):
cursor = self.execute(
"INSERT INTO images VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(image_id, name, camera_id, prior_q[0], prior_q[1], prior_q[2],
prior_q[3], prior_t[0], prior_t[1], prior_t[2]))
return cursor.lastrowid
def add_keypoints(self, image_id, keypoints):
assert(len(keypoints.shape) == 2)
assert(keypoints.shape[1] in [2, 4, 6])
keypoints = np.asarray(keypoints, np.float32)
self.execute(
"INSERT INTO keypoints VALUES (?, ?, ?, ?)",
(image_id,) + keypoints.shape + (array_to_blob(keypoints),))
def add_descriptors(self, image_id, descriptors):
descriptors = np.ascontiguousarray(descriptors, np.uint8)
self.execute(
"INSERT INTO descriptors VALUES (?, ?, ?, ?)",
(image_id,) + descriptors.shape + (array_to_blob(descriptors),))
def add_matches(self, image_id1, image_id2, matches):
assert(len(matches.shape) == 2)
assert(matches.shape[1] == 2)
if image_id1 > image_id2:
matches = matches[:,::-1]
pair_id = image_ids_to_pair_id(image_id1, image_id2)
matches = np.asarray(matches, np.uint32)
self.execute(
"INSERT INTO matches VALUES (?, ?, ?, ?)",
(pair_id,) + matches.shape + (array_to_blob(matches),))
def add_two_view_geometry(self, image_id1, image_id2, matches,
F=np.eye(3), E=np.eye(3), H=np.eye(3), config=2):
assert(len(matches.shape) == 2)
assert(matches.shape[1] == 2)
if image_id1 > image_id2:
matches = matches[:,::-1]
pair_id = image_ids_to_pair_id(image_id1, image_id2)
matches = np.asarray(matches, np.uint32)
F = np.asarray(F, dtype=np.float64)
E = np.asarray(E, dtype=np.float64)
H = np.asarray(H, dtype=np.float64)
self.execute(
"INSERT INTO two_view_geometries VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(pair_id,) + matches.shape + (array_to_blob(matches), config,
array_to_blob(F), array_to_blob(E), array_to_blob(H)))
The provided code snippet includes necessary dependencies for implementing the `import_features` function. Write a Python function `def import_features(image_ids, database_path, feature_path)` to solve the following problem:
Import keypoints info into COLMAP database.
Here is the function:
def import_features(image_ids, database_path, feature_path):
""" Import keypoints info into COLMAP database. """
logging.info("Importing features into the database...")
feature_file = h5py.File(str(feature_path), 'r')
db = COLMAPDatabase.connect(database_path)
for image_name, image_id in tqdm.tqdm(image_ids.items()):
keypoints = feature_file[image_name]['keypoints'].__array__()
keypoints += 0.5
db.add_keypoints(image_id, keypoints)
feature_file.close()
db.commit()
db.close() | Import keypoints info into COLMAP database. |
14,555 | import os
import h5py
import logging
import tqdm
import subprocess
import os.path as osp
import numpy as np
from pathlib import Path
from src.utils.colmap.read_write_model import CAMERA_MODEL_NAMES, Image, read_cameras_binary, read_images_binary
from src.utils.colmap.database import COLMAPDatabase
def names_to_pair(name0, name1):
return '_'.join((name0.replace('/', '-'), name1.replace('/', '-')))
class COLMAPDatabase(sqlite3.Connection):
def connect(database_path):
return sqlite3.connect(str(database_path), factory=COLMAPDatabase)
def __init__(self, *args, **kwargs):
super(COLMAPDatabase, self).__init__(*args, **kwargs)
self.create_tables = lambda: self.executescript(CREATE_ALL)
self.create_cameras_table = \
lambda: self.executescript(CREATE_CAMERAS_TABLE)
self.create_descriptors_table = \
lambda: self.executescript(CREATE_DESCRIPTORS_TABLE)
self.create_images_table = \
lambda: self.executescript(CREATE_IMAGES_TABLE)
self.create_two_view_geometries_table = \
lambda: self.executescript(CREATE_TWO_VIEW_GEOMETRIES_TABLE)
self.create_keypoints_table = \
lambda: self.executescript(CREATE_KEYPOINTS_TABLE)
self.create_matches_table = \
lambda: self.executescript(CREATE_MATCHES_TABLE)
self.create_name_index = lambda: self.executescript(CREATE_NAME_INDEX)
def add_camera(self, model, width, height, params,
prior_focal_length=False, camera_id=None):
params = np.asarray(params, np.float64)
cursor = self.execute(
"INSERT INTO cameras VALUES (?, ?, ?, ?, ?, ?)",
(camera_id, model, width, height, array_to_blob(params),
prior_focal_length))
return cursor.lastrowid
def add_image(self, name, camera_id,
prior_q=np.zeros(4), prior_t=np.zeros(3), image_id=None):
cursor = self.execute(
"INSERT INTO images VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(image_id, name, camera_id, prior_q[0], prior_q[1], prior_q[2],
prior_q[3], prior_t[0], prior_t[1], prior_t[2]))
return cursor.lastrowid
def add_keypoints(self, image_id, keypoints):
assert(len(keypoints.shape) == 2)
assert(keypoints.shape[1] in [2, 4, 6])
keypoints = np.asarray(keypoints, np.float32)
self.execute(
"INSERT INTO keypoints VALUES (?, ?, ?, ?)",
(image_id,) + keypoints.shape + (array_to_blob(keypoints),))
def add_descriptors(self, image_id, descriptors):
descriptors = np.ascontiguousarray(descriptors, np.uint8)
self.execute(
"INSERT INTO descriptors VALUES (?, ?, ?, ?)",
(image_id,) + descriptors.shape + (array_to_blob(descriptors),))
def add_matches(self, image_id1, image_id2, matches):
assert(len(matches.shape) == 2)
assert(matches.shape[1] == 2)
if image_id1 > image_id2:
matches = matches[:,::-1]
pair_id = image_ids_to_pair_id(image_id1, image_id2)
matches = np.asarray(matches, np.uint32)
self.execute(
"INSERT INTO matches VALUES (?, ?, ?, ?)",
(pair_id,) + matches.shape + (array_to_blob(matches),))
def add_two_view_geometry(self, image_id1, image_id2, matches,
F=np.eye(3), E=np.eye(3), H=np.eye(3), config=2):
assert(len(matches.shape) == 2)
assert(matches.shape[1] == 2)
if image_id1 > image_id2:
matches = matches[:,::-1]
pair_id = image_ids_to_pair_id(image_id1, image_id2)
matches = np.asarray(matches, np.uint32)
F = np.asarray(F, dtype=np.float64)
E = np.asarray(E, dtype=np.float64)
H = np.asarray(H, dtype=np.float64)
self.execute(
"INSERT INTO two_view_geometries VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(pair_id,) + matches.shape + (array_to_blob(matches), config,
array_to_blob(F), array_to_blob(E), array_to_blob(H)))
The provided code snippet includes necessary dependencies for implementing the `import_matches` function. Write a Python function `def import_matches(image_ids, database_path, pairs_path, matches_path, feature_path, min_match_score=None, skip_geometric_verification=False)` to solve the following problem:
Import matches info into COLMAP database.
Here is the function:
def import_matches(image_ids, database_path, pairs_path, matches_path, feature_path,
min_match_score=None, skip_geometric_verification=False):
""" Import matches info into COLMAP database. """
logging.info("Importing matches into the database...")
with open(str(pairs_path), 'r') as f:
pairs = [p.split(' ') for p in f.read().split('\n')]
match_file = h5py.File(str(matches_path), 'r')
db = COLMAPDatabase.connect(database_path)
matched = set()
for name0, name1 in tqdm.tqdm(pairs):
id0, id1 = image_ids[name0], image_ids[name1]
if len({(id0, id1), (id1, id0)} & matched) > 0:
continue
pair = names_to_pair(name0, name1)
if pair not in match_file:
raise ValueError(
f'Could not find pair {(name0, name1)}... '
'Maybe you matched with a different list of pairs? '
f'Reverse in file: {names_to_pair(name0, name1) in match_file}.'
)
matches = match_file[pair]['matches0'].__array__()
valid = matches > -1
if min_match_score:
scores = match_file[pair]['matching_scores0'].__array__()
valid = valid & (scores > min_match_score)
matches = np.stack([np.where(valid)[0], matches[valid]], -1)
db.add_matches(id0, id1, matches)
matched |= {(id0, id1), (id1, id0)}
if skip_geometric_verification:
db.add_two_view_geometry(id0, id1, matches)
match_file.close()
db.commit()
db.close() | Import matches info into COLMAP database. |
14,556 | import os
import h5py
import logging
import tqdm
import subprocess
import os.path as osp
import numpy as np
from pathlib import Path
from src.utils.colmap.read_write_model import CAMERA_MODEL_NAMES, Image, read_cameras_binary, read_images_binary
from src.utils.colmap.database import COLMAPDatabase
The provided code snippet includes necessary dependencies for implementing the `run_triangulation` function. Write a Python function `def run_triangulation(colmap_path, model_path, database_path, image_dir, empty_model)` to solve the following problem:
run triangulation on given database
Here is the function:
def run_triangulation(colmap_path, model_path, database_path, image_dir, empty_model):
""" run triangulation on given database """
logging.info('Running the triangulation...')
cmd = [
str(colmap_path), 'point_triangulator',
'--database_path', str(database_path),
'--image_path', str(image_dir),
'--input_path', str(empty_model),
'--output_path', str(model_path),
'--Mapper.ba_refine_focal_length', '0',
'--Mapper.ba_refine_principal_point', '0',
'--Mapper.ba_refine_extra_params', '0'
]
logging.info(' '.join(cmd))
ret = subprocess.call(cmd)
if ret != 0:
logging.warning('Problem with point_triangulator, existing.')
exit(ret)
stats_raw = subprocess.check_output(
[str(colmap_path), 'model_analyzer', '--path', model_path]
)
stats_raw = stats_raw.decode().split('\n')
stats = dict()
for stat in stats_raw:
if stat.startswith('Register images'):
stats['num_reg_images'] = int(stat.split()[-1])
elif stat.startswith('Points'):
stats['num_sparse_points'] = int(stat.split()[-1])
elif stat.startswith('Observation'):
stats['num_observations'] = int(stat.split()[-1])
elif stat.startswith('Mean track length'):
stats['mean_track_length'] = float(stat.split()[-1])
elif stat.startswith('Mean observation per image'):
stats['num_observations_per_image'] = float(stat.split()[-1])
elif stat.startswith('Mean reprojection error'):
stats['mean_reproj_error'] = float(stat.split()[-1][:-2])
return stats | run triangulation on given database |
14,557 | import h5py
import torch
import logging
import tqdm
import os.path as osp
confs = {
'superglue': {
'output': 'matches-spg',
'conf': {
'descriptor_dim': 256,
'weights': 'outdoor',
'match_threshold': 0.7
}
}
}
def names_to_pair(name0, name1):
return '_'.join((name0.replace('/', '-'), name1.replace('/', '-')))
class SuperGlue(nn.Module):
"""SuperGlue feature matching middle-end
Given two sets of keypoints and locations, we determine the
correspondences by:
1. Keypoint Encoding (normalization + visual feature and location fusion)
2. Graph Neural Network with multiple self and cross-attention layers
3. Final projection layer
4. Optimal Transport Layer (a differentiable Hungarian matching algorithm)
5. Thresholding matrix based on mutual exclusivity and a match_threshold
The correspondence ids use -1 to indicate non-matching points.
Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew
Rabinovich. SuperGlue: Learning Feature Matching with Graph Neural
Networks. In CVPR, 2020. https://arxiv.org/abs/1911.11763
"""
default_config = {
'descriptor_dim': 256,
'weights': 'indoor',
'keypoint_encoder': [32, 64, 128, 256],
'GNN_layers': ['self', 'cross'] * 9,
'sinkhorn_iterations': 100,
'match_threshold': 0.2,
}
def __init__(self, config):
super().__init__()
self.config = {**self.default_config, **config}
self.kenc = KeypointEncoder(
self.config['descriptor_dim'], self.config['keypoint_encoder'])
self.gnn = AttentionalGNN(
self.config['descriptor_dim'], self.config['GNN_layers'])
self.final_proj = nn.Conv1d(
self.config['descriptor_dim'], self.config['descriptor_dim'],
kernel_size=1, bias=True)
bin_score = torch.nn.Parameter(torch.tensor(1.))
self.register_parameter('bin_score', bin_score)
def forward(self, data):
"""Run SuperGlue on a pair of keypoints and descriptors"""
desc0, desc1 = data['descriptors0'], data['descriptors1']
kpts0, kpts1 = data['keypoints0'], data['keypoints1']
if kpts0.shape[1] == 0 or kpts1.shape[1] == 0: # no keypoints
shape0, shape1 = kpts0.shape[:-1], kpts1.shape[:-1]
return {
'matches0': kpts0.new_full(shape0, -1, dtype=torch.int),
'matches1': kpts1.new_full(shape1, -1, dtype=torch.int),
'matching_scores0': kpts0.new_zeros(shape0),
'matching_scores1': kpts1.new_zeros(shape1),
}
# Keypoint normalization.
kpts0 = normalize_keypoints(kpts0, data['image0'].shape)
kpts1 = normalize_keypoints(kpts1, data['image1'].shape)
# Keypoint MLP encoder.
desc0 = desc0 + self.kenc(kpts0, data['scores0'])
desc1 = desc1 + self.kenc(kpts1, data['scores1'])
# Multi-layer Transformer network.
desc0, desc1 = self.gnn(desc0, desc1)
# Final MLP projection.
mdesc0, mdesc1 = self.final_proj(desc0), self.final_proj(desc1)
# Compute matching descriptor distance.
scores = torch.einsum('bdn,bdm->bnm', mdesc0, mdesc1)
scores = scores / self.config['descriptor_dim']**.5
# Run the optimal transport.
scores = log_optimal_transport(
scores, self.bin_score,
iters=self.config['sinkhorn_iterations'])
# Get the matches with score above "match_threshold".
max0, max1 = scores[:, :-1, :-1].max(2), scores[:, :-1, :-1].max(1)
indices0, indices1 = max0.indices, max1.indices
mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0)
mutual1 = arange_like(indices1, 1)[None] == indices0.gather(1, indices1)
zero = scores.new_tensor(0)
mscores0 = torch.where(mutual0, max0.values.exp(), zero)
mscores1 = torch.where(mutual1, mscores0.gather(1, indices1), zero)
valid0 = mutual0 & (mscores0 > self.config['match_threshold'])
valid1 = mutual1 & valid0.gather(1, indices1)
indices0 = torch.where(valid0, indices0, indices0.new_tensor(-1))
indices1 = torch.where(valid1, indices1, indices1.new_tensor(-1))
return {
'matches0': indices0, # use -1 for invalid match
'matches1': indices1, # use -1 for invalid match
'matching_scores0': mscores0,
'matching_scores1': mscores1,
}
def load_network(net, model_dir, resume=True, epoch=-1, strict=True, force=False):
"""
Load latest network-weights from dir or path
"""
if not resume:
return 0
if not os.path.exists(model_dir):
if force:
raise NotImplementedError
else:
print('pretrained model does not exist')
return 0
if os.path.isdir(model_dir):
pths = [int(pth.split('.')[0]) for pth in os.listdir(model_dir) if 'pth' in pth]
if len(pths) == 0:
return 0
if epoch == -1:
pth = max(pths)
else:
pth = epoch
model_path = os.path.join(model_dir, '{}.pth'.format(pth))
else:
model_path = model_dir
print('=> load weights: ', model_path)
pretrained_model = torch.load(model_path, torch.device("cpu"))
if 'net' in pretrained_model.keys():
net.load_state_dict(pretrained_model['net'], strict=strict)
else:
net.load_state_dict(pretrained_model, strict=strict)
return pretrained_model.get('epoch', 0) + 1
def vis_match_pairs(pred, feats0, feats1, name0, name1):
"""vis matches on two images"""
import matplotlib.cm as cm
image0_path = name0
image1_path = name1
image0 = cv2.imread(image0_path)
image0 = cv2.cvtColor(image0, cv2.COLOR_RGB2GRAY)
image1 = cv2.imread(image1_path)
image1 = cv2.cvtColor(image1, cv2.COLOR_RGB2GRAY)
matches = pred['matches0'][0].detach().cpu().numpy()
valid = matches > -1
kpts0, kpts1 = feats0['keypoints'].__array__(), feats1['keypoints'].__array__()
mkpts0, mkpts1 = kpts0[valid], kpts1[matches[valid]]
conf = pred['matching_scores0'][0].detach().cpu().numpy()
mconf = conf[valid]
color = cm.jet(mconf)
make_matching_plot_fast(
image0, image1, kpts0, kpts1,
mkpts0, mkpts1, color, text=[]
)
The provided code snippet includes necessary dependencies for implementing the `spg` function. Write a Python function `def spg(cfg, feature_path, covis_pairs, matches_out, vis_match=False)` to solve the following problem:
Match features by SuperGlue
Here is the function:
def spg(cfg, feature_path, covis_pairs, matches_out, vis_match=False):
"""Match features by SuperGlue"""
from src.models.matchers.SuperGlue.superglue import SuperGlue as spg_matcher
from src.utils.model_io import load_network
from src.utils.vis_utils import vis_match_pairs
assert osp.exists(feature_path), feature_path
feature_file = h5py.File(feature_path, 'r')
logging.info(f'Exporting matches to {matches_out}')
with open(covis_pairs, 'r') as f:
pair_list = f.read().rstrip('\n').split('\n')
# load superglue model
conf = confs[cfg.network.matching]['conf']
model = spg_matcher(conf).cuda()
model.eval()
load_network(model, cfg.network.matching_model_path, force=True)
# match features by superglue
match_file = h5py.File(matches_out, 'w')
matched = set()
for pair in tqdm.tqdm(pair_list):
name0, name1 = pair.split(' ')
pair = names_to_pair(name0, name1)
if len({(name0, name1), (name1, name0)} & matched) \
or pair in match_file:
continue
data = {}
feats0, feats1 = feature_file[name0], feature_file[name1]
for k in feats0.keys():
data[k+'0'] = feats0[k].__array__()
for k in feats1.keys():
data[k+'1'] = feats1[k].__array__()
data = {k: torch.from_numpy(v)[None].float().cuda() for k, v in data.items()}
data['image0'] = torch.empty((1, 1, ) + tuple(feats0['image_size'])[::-1])
data['image1'] = torch.empty((1, 1, ) + tuple(feats1['image_size'])[::-1])
pred = model(data)
grp = match_file.create_group(pair)
matches0 = pred['matches0'][0].cpu().short().numpy()
grp.create_dataset('matches0', data=matches0)
matches1 = pred['matches1'][0].cpu().short().numpy()
grp.create_dataset('matches1', data=matches1)
if 'matching_scores0' in pred:
scores = pred['matching_scores0'][0].cpu().half().numpy()
grp.create_dataset('matching_scores0', data=scores)
if 'matching_scores1' in pred:
scores = pred['matching_scores1'][0].cpu().half().numpy()
grp.create_dataset('matching_scores1', data=scores)
matched |= {(name0, name1), (name1, name0)}
if vis_match:
vis_match_pairs(pred, feats0, feats1, name0, name1)
match_file.close()
logging.info('Finishing exporting matches.') | Match features by SuperGlue |
14,558 | import h5py
import json
import os.path as osp
import numpy as np
from collections import defaultdict
from pathlib import Path
from src.utils.colmap import read_write_model
from src.utils import path_utils
The provided code snippet includes necessary dependencies for implementing the `average_3d_ann` function. Write a Python function `def average_3d_ann(kp3d_id_feature, kp3d_id_score, xyzs, points_idxs, feature_dim)` to solve the following problem:
average position, descriptors and scores for 3d points new_point_feature = avg(all merged 3d points features) = avg(all matched 2d points features)
Here is the function:
def average_3d_ann(kp3d_id_feature, kp3d_id_score, xyzs, points_idxs, feature_dim):
"""
average position, descriptors and scores for 3d points
new_point_feature = avg(all merged 3d points features) = avg(all matched 2d points features)
"""
kp3d_descriptors = np.empty(shape=(0, feature_dim))
kp3d_scores = np.empty(shape=(0, 1))
kp3d_position = np.empty(shape=(0, 3))
for new_point_idx, old_points_idxs in points_idxs.items():
descriptors = np.empty(shape=(0, feature_dim))
scores = np.empty(shape=(0, 1))
for old_point_idx in old_points_idxs:
descriptors = np.append(descriptors, kp3d_id_feature[old_point_idx], axis=0)
scores = np.append(scores, kp3d_id_score[old_point_idx].reshape(-1, 1), axis=0)
avg_descriptor = np.mean(descriptors, axis=0).reshape(1, -1)
avg_score = np.mean(scores, axis=0).reshape(1, -1)
kp3d_position = np.append(kp3d_position, xyzs[new_point_idx].reshape(1, 3), axis=0)
kp3d_descriptors = np.append(kp3d_descriptors, avg_descriptor, axis=0)
kp3d_scores = np.append(kp3d_scores, avg_score, axis=0)
return kp3d_position, kp3d_descriptors, kp3d_scores | average position, descriptors and scores for 3d points new_point_feature = avg(all merged 3d points features) = avg(all matched 2d points features) |
14,559 | import h5py
import tqdm
import torch
import logging
from torch.utils.data import DataLoader
confs = {
'superpoint': {
'output': 'feats-spp',
'model': {
'name': 'spp_det',
},
'preprocessing': {
'grayscale': True,
'resize_h': 512,
'resize_w': 512
},
'conf': {
'descriptor_dim': 256,
'nms_radius': 3,
'max_keypoints': 4096,
'keypoints_threshold': 0.6
}
}
}
def load_network(net, model_dir, resume=True, epoch=-1, strict=True, force=False):
"""
Load latest network-weights from dir or path
"""
if not resume:
return 0
if not os.path.exists(model_dir):
if force:
raise NotImplementedError
else:
print('pretrained model does not exist')
return 0
if os.path.isdir(model_dir):
pths = [int(pth.split('.')[0]) for pth in os.listdir(model_dir) if 'pth' in pth]
if len(pths) == 0:
return 0
if epoch == -1:
pth = max(pths)
else:
pth = epoch
model_path = os.path.join(model_dir, '{}.pth'.format(pth))
else:
model_path = model_dir
print('=> load weights: ', model_path)
pretrained_model = torch.load(model_path, torch.device("cpu"))
if 'net' in pretrained_model.keys():
net.load_state_dict(pretrained_model['net'], strict=strict)
else:
net.load_state_dict(pretrained_model, strict=strict)
return pretrained_model.get('epoch', 0) + 1
class SuperPoint(nn.Module):
"""SuperPoint Convolutional Detector and Descriptor
SuperPoint: Self-Supervised Interest Point Detection and
Description. Daniel DeTone, Tomasz Malisiewicz, and Andrew
Rabinovich. In CVPRW, 2019. https://arxiv.org/abs/1712.07629
"""
default_config = {
'descriptor_dim': 256,
'nms_radius': 4,
'keypoint_threshold': 0.005,
'max_keypoints': -1,
'remove_borders': 4,
}
def __init__(self, config):
super().__init__()
self.config = {**self.default_config, **config}
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
c1, c2, c3, c4, c5 = 64, 64, 128, 128, 256
self.conv1a = nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1)
self.conv1b = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1)
self.conv2a = nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1)
self.conv2b = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1)
self.conv3a = nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1)
self.conv3b = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1)
self.conv4a = nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1)
self.conv4b = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1)
self.convPa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1)
self.convPb = nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0)
self.convDa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1)
self.convDb = nn.Conv2d(
c5, self.config['descriptor_dim'],
kernel_size=1, stride=1, padding=0)
mk = self.config['max_keypoints']
if mk == 0 or mk < -1:
raise ValueError('\"max_keypoints\" must be positive or \"-1\"')
def forward(self, inp):
""" Compute keypoints, scores, descriptors for image """
# Shared Encoder
x = self.relu(self.conv1a(inp))
x = self.relu(self.conv1b(x))
x = self.pool(x)
x = self.relu(self.conv2a(x))
x = self.relu(self.conv2b(x))
x = self.pool(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
# Compute the dense keypoint scores
cPa = self.relu(self.convPa(x))
scores = self.convPb(cPa)
scores = torch.nn.functional.softmax(scores, 1)[:, :-1]
b, _, h, w = scores.shape
scores = scores.permute(0, 2, 3, 1).reshape(b, h, w, 8, 8)
scores = scores.permute(0, 1, 3, 2, 4).reshape(b, h*8, w*8)
scores = simple_nms(scores, self.config['nms_radius'])
# Extract keypoints
keypoints = [
torch.nonzero(s > self.config['keypoint_threshold'])
for s in scores]
scores = [s[tuple(k.t())] for s, k in zip(scores, keypoints)]
# Discard keypoints near the image borders
keypoints, scores = list(zip(*[
remove_borders(k, s, self.config['remove_borders'], h*8, w*8)
for k, s in zip(keypoints, scores)]))
# Keep the k keypoints with highest score
if self.config['max_keypoints'] >= 0:
keypoints, scores = list(zip(*[
top_k_keypoints(k, s, self.config['max_keypoints'])
for k, s in zip(keypoints, scores)]))
# Convert (h, w) to (x, y)
keypoints = [torch.flip(k, [1]).float() for k in keypoints]
# Compute the dense descriptors
cDa = self.relu(self.convDa(x))
descriptors = self.convDb(cDa)
descriptors = torch.nn.functional.normalize(descriptors, p=2, dim=1)
# Extract descriptors
descriptors = [sample_descriptors(k[None], d[None], 8)[0]
for k, d in zip(keypoints, descriptors)]
return {
'keypoints': keypoints,
'scores': scores,
'descriptors': descriptors,
}
class NormalizedDataset(Dataset):
"""read images(suppose images have been cropped)"""
default_conf = {
'globs': ['*.jpg', '*.png'],
'grayscale': True,
}
def __init__(self, img_lists, conf):
self.img_lists = img_lists
self.conf = SimpleNamespace(**{**self.default_conf, **conf})
if len(img_lists) == 0:
raise ValueError('Could not find any image.')
def __getitem__(self, index):
img_path = self.img_lists[index]
mode = cv2.IMREAD_GRAYSCALE if self.conf.grayscale else cv2.IMREAD_COLOR
image = cv2.imread(img_path, mode)
size = image.shape[:2]
image = image.astype(np.float32)
if self.conf.grayscale:
image = image[None]
else:
image = image.transpose((2, 0, 1))
image /= 255.
data = {
'path': str(img_path),
'image': image,
'size': np.array(size),
}
return data
def __len__(self):
return len(self.img_lists)
The provided code snippet includes necessary dependencies for implementing the `spp` function. Write a Python function `def spp(img_lists, feature_out, cfg)` to solve the following problem:
extract keypoints info by superpoint
Here is the function:
def spp(img_lists, feature_out, cfg):
"""extract keypoints info by superpoint"""
from src.utils.model_io import load_network
from src.models.extractors.SuperPoint.superpoint import SuperPoint as spp_det
from src.datasets.normalized_dataset import NormalizedDataset
conf = confs[cfg.network.detection]
model = spp_det(conf['conf']).cuda()
model.eval()
load_network(model, cfg.network.detection_model_path, force=True)
dataset = NormalizedDataset(img_lists, conf['preprocessing'])
loader = DataLoader(dataset, num_workers=1)
feature_file = h5py.File(feature_out, 'w')
logging.info(f'Exporting features to {feature_out}')
for data in tqdm.tqdm(loader):
inp = data['image'].cuda()
pred = model(inp)
pred = {k: v[0].cpu().numpy() for k, v in pred.items()}
pred['image_size'] = data['size'][0].numpy()
grp = feature_file.create_group(data['path'][0])
for k, v in pred.items():
grp.create_dataset(k, data=v)
del pred
feature_file.close()
logging.info('Finishing exporting features.') | extract keypoints info by superpoint |
14,560 | import numpy as np
import torch
def compute_epipolar_error(kpts0, kpts1, T_0to1, K0, K1):
def to_homogeneous(points):
return np.concatenate([points, np.ones_like(points[:, :1])], axis=-1)
kpts0 = (kpts0 - K0[[0, 1], [2, 2]][None]) / K0[[0, 1], [0, 1]][None]
kpts1 = (kpts1 - K1[[0, 1], [2, 2]][None]) / K1[[0, 1], [0, 1]][None]
kpts0 = to_homogeneous(kpts0)
kpts1 = to_homogeneous(kpts1)
t0, t1, t2 = T_0to1[:3, 3]
t_skew = np.array([
[0, -t2, t1],
[t2, 0, -t0],
[-t1, t0, 0]
])
E = t_skew @ T_0to1[:3, :3]
Ep0 = kpts0 @ E.T # N x 3
p1Ep0 = np.sum(kpts1 * Ep0, -1) # N
Etp1 = kpts1 @ E # N x 3
d = p1Ep0 ** 2 * (1.0 / (Ep0[:, 0] ** 2 + Ep0[:, 1] ** 2)
+ 1.0 / (Etp1[:, 0] ** 2 + Etp1[:, 1] ** 2))
return d | null |
14,561 | import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `project` function. Write a Python function `def project(xyz, K, RT, need_depth=False)` to solve the following problem:
xyz: [N, 3] K: [3, 3] RT: [3, 4]
Here is the function:
def project(xyz, K, RT, need_depth=False):
"""
xyz: [N, 3]
K: [3, 3]
RT: [3, 4]
"""
xyz = np.dot(xyz, RT[:, :3].T)
xyz += RT[:, 3:].T
depth = xyz[:, 2:].flatten()
xyz = np.dot(xyz, K.T)
xy = xyz[:, :2] / xyz[:, 2:]
if need_depth:
return xy, depth
else:
return xy | xyz: [N, 3] K: [3, 3] RT: [3, 4] |
14,562 | import numpy as np
import torch
def AngleAxisRotatePoint(angleAxis, pt):
theta2 = (angleAxis * angleAxis).sum(dim=1)
mask = (theta2 > 0).float()
theta = torch.sqrt(theta2 + (1 - mask))
mask = mask.reshape((mask.shape[0], 1))
mask = torch.cat([mask, mask, mask], dim=1)
costheta = torch.cos(theta)
sintheta = torch.sin(theta)
thetaInverse = 1.0 / theta
w0 = angleAxis[:, 0] * thetaInverse
w1 = angleAxis[:, 1] * thetaInverse
w2 = angleAxis[:, 2] * thetaInverse
wCrossPt0 = w1 * pt[:, 2] - w2 * pt[:, 1]
wCrossPt1 = w2 * pt[:, 0] - w0 * pt[:, 2]
wCrossPt2 = w0 * pt[:, 1] - w1 * pt[:, 0]
tmp = (w0 * pt[:, 0] + w1 * pt[:, 1] + w2 * pt[:, 2]) * (1.0 - costheta)
r0 = pt[:, 0] * costheta + wCrossPt0 * sintheta + w0 * tmp
r1 = pt[:, 1] * costheta + wCrossPt1 * sintheta + w1 * tmp
r2 = pt[:, 2] * costheta + wCrossPt2 * sintheta + w2 * tmp
r0 = r0.reshape((r0.shape[0], 1))
r1 = r1.reshape((r1.shape[0], 1))
r2 = r2.reshape((r2.shape[0], 1))
res1 = torch.cat([r0, r1, r2], dim=1)
wCrossPt0 = angleAxis[:, 1] * pt[:, 2] - angleAxis[:, 2] * pt[:, 1]
wCrossPt1 = angleAxis[:, 2] * pt[:, 0] - angleAxis[:, 0] * pt[:, 2]
wCrossPt2 = angleAxis[:, 0] * pt[:, 1] - angleAxis[:, 1] * pt[:, 0]
r00 = pt[:, 0] + wCrossPt0
r01 = pt[:, 1] + wCrossPt1
r02 = pt[:, 2] + wCrossPt2
r00 = r00.reshape((r00.shape[0], 1))
r01 = r01.reshape((r01.shape[0], 1))
r02 = r02.reshape((r02.shape[0], 1))
res2 = torch.cat([r00, r01, r02], dim=1)
return res1 * mask + res2 * (1 - mask)
def SnavelyReprojectionErrorV2(points_ob, cameras_ob, features):
if (len(points_ob.shape) == 3):
points_ob = points_ob[:,0,:]
cameras_ob = cameras_ob[:,0,:]
focals = features[:, 2]
l1 = features[:, 3]
l2 = features[:, 4]
# camera[0,1,2] are the angle-axis rotation.
p = AngleAxisRotatePoint(cameras_ob[:, :3], points_ob)
p = p + cameras_ob[:, 3:6]
xp = p[:,0] / p[:,2]
yp = p[:,1] / p[:,2]
# predicted_x, predicted_y = DistortV2(xp, yp, cameras_ob, cam_K)
predicted_x = focals * xp + l1
predicted_y = focals * yp + l2
residual_0 = predicted_x - features[:, 0]
residual_1 = predicted_y - features[:, 1]
residual_0 = residual_0.reshape((residual_0.shape[0], 1))
residual_1 = residual_1.reshape((residual_1.shape[0], 1))
#return torch.sqrt(residual_0**2 + residual_1 ** 2)
return torch.cat([residual_0, residual_1], dim=1) | null |
14,563 | import numpy as np
import torch
def put_text(img, inform_text, color=None):
import cv2
fontScale = 1
if color is None:
color = (255, 0, 0)
org = (50, 50)
font = cv2.FONT_HERSHEY_SIMPLEX
thickness = 2
img = cv2.putText(img, inform_text, org, font,
fontScale, color, thickness, cv2.LINE_AA)
return img | null |
14,564 | import numpy as np
import torch
def draw_kpt2d(image, kpt2d, color=(0, 0, 255), radius=2, thikness=1):
import cv2
for coord in kpt2d:
cv2.circle(image, (int(coord[0]), int(coord[1])), radius, color, thikness, 1)
# cv2.circle(image, (int(coord[0]), int(coord[1])), 7, color, 1, 1)
return image | null |
14,565 | from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from .GATs import GraphAttentionLayer
def arange_like(x, dim: int):
return x.new_ones(x.shape[dim]).cumsum(0) - 1 | null |
14,566 | from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from .GATs import GraphAttentionLayer
def buildAdjMatrix(num_2d, num_3d):
num_leaf = int(num_2d / num_3d)
adj_matrix = torch.zeros(num_3d, num_2d)
for i in range(num_3d):
adj_matrix[i, num_leaf*i: num_leaf*(i+1)] = 1 / num_leaf
return adj_matrix.cuda() | null |
14,567 | from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from .GATs import GraphAttentionLayer
def linear_attention(query, key, value):
eps = 1e-6
query = F.elu(query) + 1
key = F.elu(key) + 1
v_length = value.size(3)
value = value / v_length
KV = torch.einsum('bdhm,bqhm->bqdh', key, value)
Z = 1 / (torch.einsum('bdhm,bdh->bhm', query, key.sum(3)) + eps)
queried_values = torch.einsum('bdhm,bqdh,bhm->bqhm', query, KV, Z) * v_length
return queried_values.contiguous() | null |
14,568 | from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from .GATs import GraphAttentionLayer
The provided code snippet includes necessary dependencies for implementing the `MLP` function. Write a Python function `def MLP(channels: list, do_bn=True)` to solve the following problem:
Multi-layer perceptron
Here is the function:
def MLP(channels: list, do_bn=True):
""" Multi-layer perceptron"""
n = len(channels)
layers = []
for i in range(1, n):
layers.append(
nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True)
)
if i < n -1:
if do_bn:
layers.append(nn.InstanceNorm1d(channels[i]))
layers.append(nn.ReLU())
return nn.Sequential(*layers) | Multi-layer perceptron |
14,569 | from pathlib import Path
import torch
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `simple_nms` function. Write a Python function `def simple_nms(scores, nms_radius: int)` to solve the following problem:
Fast Non-maximum suppression to remove nearby points
Here is the function:
def simple_nms(scores, nms_radius: int):
""" Fast Non-maximum suppression to remove nearby points """
assert(nms_radius >= 0)
def max_pool(x):
return torch.nn.functional.max_pool2d(
x, kernel_size=nms_radius*2+1, stride=1, padding=nms_radius)
zeros = torch.zeros_like(scores)
max_mask = scores == max_pool(scores)
for _ in range(2):
supp_mask = max_pool(max_mask.float()) > 0
supp_scores = torch.where(supp_mask, zeros, scores)
new_max_mask = supp_scores == max_pool(supp_scores)
max_mask = max_mask | (new_max_mask & (~supp_mask))
return torch.where(max_mask, scores, zeros) | Fast Non-maximum suppression to remove nearby points |
14,570 | from pathlib import Path
import torch
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `remove_borders` function. Write a Python function `def remove_borders(keypoints, scores, border: int, height: int, width: int)` to solve the following problem:
Removes keypoints too close to the border
Here is the function:
def remove_borders(keypoints, scores, border: int, height: int, width: int):
""" Removes keypoints too close to the border """
mask_h = (keypoints[:, 0] >= border) & (keypoints[:, 0] < (height - border))
mask_w = (keypoints[:, 1] >= border) & (keypoints[:, 1] < (width - border))
mask = mask_h & mask_w
return keypoints[mask], scores[mask] | Removes keypoints too close to the border |
14,571 | from pathlib import Path
import torch
from torch import nn
def top_k_keypoints(keypoints, scores, k: int):
if k >= len(keypoints):
return keypoints, scores
scores, indices = torch.topk(scores, k, dim=0)
return keypoints[indices], scores | null |
14,572 | from pathlib import Path
import torch
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `sample_descriptors` function. Write a Python function `def sample_descriptors(keypoints, descriptors, s: int = 8)` to solve the following problem:
Interpolate descriptors at keypoint locations
Here is the function:
def sample_descriptors(keypoints, descriptors, s: int = 8):
""" Interpolate descriptors at keypoint locations """
b, c, h, w = descriptors.shape
keypoints = keypoints - s / 2 + 0.5
keypoints /= torch.tensor([(w*s - s/2 - 0.5), (h*s - s/2 - 0.5)],
).to(keypoints)[None]
keypoints = keypoints*2 - 1 # normalize to (-1, 1)
args = {'align_corners': True} if int(torch.__version__[2]) > 2 else {}
descriptors = torch.nn.functional.grid_sample(
descriptors, keypoints.view(b, 1, -1, 2), mode='bilinear', **args)
descriptors = torch.nn.functional.normalize(
descriptors.reshape(b, c, -1), p=2, dim=1)
return descriptors | Interpolate descriptors at keypoint locations |
14,573 | import torch
import torch.nn as nn
def find_nn(sim, ratio_thresh, distance_thresh):
sim_nn, ind_nn = sim.topk(2 if ratio_thresh else 1, dim=-1, largest=True)
dist_nn = 2 * (1 - sim_nn)
mask = torch.ones(ind_nn.shape[:-1], dtype=torch.bool, device=sim.device)
if ratio_thresh:
mask = mask & (dist_nn[..., 0] <= (ratio_thresh ** 2) * dist_nn[..., 1])
if distance_thresh:
mask = mask & (dist_nn[..., 0] <= distance_thresh ** 2)
matches = torch.where(mask, ind_nn[..., 0], ind_nn.new_tensor(-1))
scores = torch.where(mask, (sim_nn[..., 0] + 1) / 2, sim_nn.new_tensor(0))
return matches, scores | null |
14,574 | import torch
import torch.nn as nn
def mutual_check(m0, m1):
inds0 = torch.arange(m0.shape[-1], device=m0.device)
loop = torch.gather(m1, -1, torch.where(m0 > -1, m0, m0.new_tensor(0)))
ok = (m0 > -1) & (inds0 == loop)
m0_new = torch.where(ok, m0, m0.new_tensor(-1))
return m0_new | null |
14,575 | from copy import deepcopy
from pathlib import Path
import torch
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `MLP` function. Write a Python function `def MLP(channels: list, do_bn=True)` to solve the following problem:
Multi-layer perceptron
Here is the function:
def MLP(channels: list, do_bn=True):
""" Multi-layer perceptron """
n = len(channels)
layers = []
for i in range(1, n):
layers.append(
nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True))
if i < (n-1):
if do_bn:
layers.append(nn.BatchNorm1d(channels[i]))
layers.append(nn.ReLU())
return nn.Sequential(*layers) | Multi-layer perceptron |
14,576 | from copy import deepcopy
from pathlib import Path
import torch
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `normalize_keypoints` function. Write a Python function `def normalize_keypoints(kpts, image_shape)` to solve the following problem:
Normalize keypoints locations based on image image_shape
Here is the function:
def normalize_keypoints(kpts, image_shape):
""" Normalize keypoints locations based on image image_shape"""
_, _, height, width = image_shape
one = kpts.new_tensor(1)
size = torch.stack([one*width, one*height])[None]
center = size / 2
scaling = size.max(1, keepdim=True).values * 0.7
return (kpts - center[:, None, :]) / scaling[:, None, :] | Normalize keypoints locations based on image image_shape |
14,577 | from copy import deepcopy
from pathlib import Path
import torch
from torch import nn
def attention(query, key, value):
dim = query.shape[1]
scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim**.5
prob = torch.nn.functional.softmax(scores, dim=-1)
return torch.einsum('bhnm,bdhm->bdhn', prob, value), prob | null |
14,578 | from copy import deepcopy
from pathlib import Path
import torch
from torch import nn
def log_sinkhorn_iterations(Z, log_mu, log_nu, iters: int):
""" Perform Sinkhorn Normalization in Log-space for stability"""
u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu)
for _ in range(iters):
u = log_mu - torch.logsumexp(Z + v.unsqueeze(1), dim=2)
v = log_nu - torch.logsumexp(Z + u.unsqueeze(2), dim=1)
return Z + u.unsqueeze(2) + v.unsqueeze(1)
The provided code snippet includes necessary dependencies for implementing the `log_optimal_transport` function. Write a Python function `def log_optimal_transport(scores, alpha, iters: int)` to solve the following problem:
Perform Differentiable Optimal Transport in Log-space for stability
Here is the function:
def log_optimal_transport(scores, alpha, iters: int):
""" Perform Differentiable Optimal Transport in Log-space for stability"""
b, m, n = scores.shape
one = scores.new_tensor(1)
ms, ns = (m*one).to(scores), (n*one).to(scores)
bins0 = alpha.expand(b, m, 1)
bins1 = alpha.expand(b, 1, n)
alpha = alpha.expand(b, 1, 1)
couplings = torch.cat([torch.cat([scores, bins0], -1),
torch.cat([bins1, alpha], -1)], 1)
norm = - (ms + ns).log()
log_mu = torch.cat([norm.expand(m), ns.log()[None] + norm])
log_nu = torch.cat([norm.expand(n), ms.log()[None] + norm])
log_mu, log_nu = log_mu[None].expand(b, -1), log_nu[None].expand(b, -1)
Z = log_sinkhorn_iterations(couplings, log_mu, log_nu, iters)
Z = Z - norm # multiply probabilities by M+N
return Z | Perform Differentiable Optimal Transport in Log-space for stability |
14,579 | from copy import deepcopy
from pathlib import Path
import torch
from torch import nn
def arange_like(x, dim: int):
return x.new_ones(x.shape[dim]).cumsum(0) - 1 # traceable in 1.1 | null |
14,580 | import cv2
import torch
import numpy as np
import os.path as osp
from loguru import logger
from pathlib import Path
def pad_keypoints2d_random(keypoints, features, scores, img_h, img_w, n_target_kpts):
dtype = keypoints.dtype
n_pad = n_target_kpts - keypoints.shape[0]
if n_pad < 0:
keypoints = keypoints[:n_target_kpts] # [n_target_kpts, 2]
features = features[:, :n_target_kpts] # [dim, n_target_kpts]
scores = scores[:n_target_kpts] # [n_target_kpts, 1]
else:
while n_pad > 0:
random_kpts_x = torch.randint(0, img_w, (n_pad, ), dtype=dtype)
random_kpts_y = torch.randint(0, img_h, (n_pad, ), dtype=dtype)
rand_kpts = torch.stack([random_kpts_y, random_kpts_x], dim=1)
exist = (rand_kpts[:, None, :] == keypoints[None, :, :]).all(-1).any(1) # (n_pad, )
kept_kpts = rand_kpts[~exist] # (n_kept, 2)
n_pad -= len(kept_kpts)
if len(kept_kpts) > 0:
keypoints = torch.cat([keypoints, kept_kpts], 0)
scores = torch.cat([scores, torch.zeros(len(kept_kpts), 1, dtype=scores.dtype)], dim=0)
features = torch.cat([features, torch.ones(features.shape[0], len(kept_kpts))], dim=1)
return keypoints, features, scores | null |
14,581 | import cv2
import torch
import numpy as np
import os.path as osp
from loguru import logger
from pathlib import Path
def pad_features(features, num_leaf):
num_features = features.shape[0]
feature_dim = features.shape[1]
n_pad = num_leaf - num_features
if n_pad <= 0:
features = features[:num_leaf]
else:
features = torch.cat([features, torch.ones((num_leaf - num_features, feature_dim))], dim=0)
return features.T | null |
14,582 | import cv2
import torch
import numpy as np
import os.path as osp
from loguru import logger
from pathlib import Path
def pad_scores(scores, num_leaf):
num_scores = scores.shape[0]
n_pad = num_leaf - num_scores
if n_pad <= 0:
scores = scores[:num_leaf]
else:
scores = torch.cat([scores, torch.zeros((num_leaf - num_scores, 1))], dim=0)
return scores | null |
14,583 | import cv2
import torch
import numpy as np
import os.path as osp
from loguru import logger
from pathlib import Path
def avg_features(features):
ret_features = torch.mean(features, dim=0).reshape(-1, 1)
return ret_features | null |
14,584 | import cv2
import torch
import numpy as np
import os.path as osp
from loguru import logger
from pathlib import Path
def avg_scores(scores):
ret_scores = torch.mean(scores, dim=0).reshape(-1, 1)
return ret_scores | null |
14,585 | import cv2
import torch
import numpy as np
import os.path as osp
from loguru import logger
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `pad_keypoints3d_random` function. Write a Python function `def pad_keypoints3d_random(keypoints, n_target_kpts)` to solve the following problem:
Pad or truncate orig 3d keypoints to fixed size.
Here is the function:
def pad_keypoints3d_random(keypoints, n_target_kpts):
""" Pad or truncate orig 3d keypoints to fixed size."""
n_pad = n_target_kpts - keypoints.shape[0]
if n_pad < 0:
keypoints = keypoints[:n_target_kpts] # [n_target_kpts: 3]
else :
while n_pad > 0:
rand_kpts_x = torch.rand(n_pad, 1) - 0.5 # zero mean
rand_kpts_y = torch.rand(n_pad, 1) - 0.5 # zero mean
rand_kpts_z = torch.rand(n_pad, 1) - 0.5 # zero mean
rand_kpts = torch.cat([rand_kpts_x, rand_kpts_y, rand_kpts_z], dim=1) # [n_pad, 3]
exist = (rand_kpts[:, None, :] == keypoints[None, :, :]).all(-1).any(1)
kept_kpts = rand_kpts[~exist] # [n_kept, 3]
n_pad -= len(kept_kpts)
if len(kept_kpts) > 0:
keypoints = torch.cat([keypoints, kept_kpts], dim=0)
return keypoints | Pad or truncate orig 3d keypoints to fixed size. |
14,586 | import cv2
import torch
import numpy as np
import os.path as osp
from loguru import logger
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `reshape_assign_matrix` function. Write a Python function `def reshape_assign_matrix(assign_matrix, orig_shape2d, orig_shape3d, shape2d, shape3d, pad=True, pad_val=0)` to solve the following problem:
Reshape assign matrix (from 2xk to nxm)
Here is the function:
def reshape_assign_matrix(assign_matrix, orig_shape2d, orig_shape3d,
shape2d, shape3d, pad=True, pad_val=0):
""" Reshape assign matrix (from 2xk to nxm)"""
assign_matrix = assign_matrix.long()
if pad:
conf_matrix = torch.zeros(shape2d, shape3d, dtype=torch.int16)
valid = (assign_matrix[0] < shape2d) & (assign_matrix[1] < shape3d)
assign_matrix = assign_matrix[:, valid]
conf_matrix[assign_matrix[0], assign_matrix[1]] = 1
conf_matrix[orig_shape2d:] = pad_val
conf_matrix[:, orig_shape3d:] = pad_val
else:
conf_matrix = torch.zeros(orig_shape2d, orig_shape3d, dtype=torch.int16)
valid = (assign_matrix[0] < shape2d) & (assign_matrix[1] < shape3d)
conf_matrix = conf_matrix[:, valid]
conf_matrix[assign_matrix[0], assign_matrix[1]] = 1
return conf_matrix | Reshape assign matrix (from 2xk to nxm) |
14,587 | import cv2
import torch
import numpy as np
import os.path as osp
from loguru import logger
from pathlib import Path
def read_gray_scale(img_file):
image = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE)
image = image.astype(np.float32)
image = image[None]
return image | null |
14,588 | import torch
import os
from collections import OrderedDict
def save_model(net, optim, scheduler, recorder, epoch, model_dir):
os.system('mkdir -p {}'.format(model_dir))
torch.save({
'net': net.state_dict(),
'optim': optim.state_dict(),
'scheduler': scheduler.state_dict(),
'recorder': recorder.state_dict(),
'epoch': epoch
}, os.path.join(model_dir, '{}.pth'.format(epoch)))
# remove previous pretrained model if the number of models is too big
pths = [int(pth.split('.')[0]) for pth in os.listdir(model_dir)]
if len(pths) <= 200:
return
os.system('rm {}'.format(os.path.join(model_dir, '{}.pth'.format(min(pths))))) | null |
14,589 | import torch
import os
from collections import OrderedDict
def remove_net_prefix(net, prefix):
net_ = OrderedDict()
for k in net.keys():
if k.startswith(prefix):
net_[k[len(prefix):]] = net[k]
else:
net_[k] = net[k]
return net_
def remove_net_layer(net, layers):
keys = list(net.keys())
for k in keys:
for layer in layers:
if k.startswith(layer):
del net[k]
return net
def load_network_ckpt(net, ckpt_path):
pretrained_model = torch.load(ckpt_path, torch.device('cpu'))
pretrained_model = pretrained_model['state_dict']
pretrained_model = remove_net_layer(pretrained_model, 'detector')
pretrained_model = remove_net_prefix(pretrained_model, 'superglue.')
print('=> load weights: ', ckpt_path)
net.load_state_dict(pretrained_model)
return None | null |
14,590 | import torch
import os
from collections import OrderedDict
def add_net_prefix(net, prefix):
net_ = OrderedDict()
for k in net.keys():
net_[prefix + k] = net[k]
return net_ | null |
14,591 | import torch
import os
from collections import OrderedDict
def replace_net_prefix(net, orig_prefix, prefix):
net_ = OrderedDict()
for k in net.keys():
if k.startswith(orig_prefix):
net_[prefix + k[len(orig_prefix):]] = net[k]
else:
net_[k] = net[k]
return net_ | null |
14,592 | import torch
import os
from collections import OrderedDict
def to_cuda(data):
if type(data).__name__ == "Tensor":
data = data.cuda()
elif type(data).__name__ == 'list':
data = [d.cuda() for d in data]
elif type(data).__name__ == 'dict':
data = {k: v.cuda() for k, v in data.items()}
else:
raise NotImplementedError
return data | null |
14,593 | import cv2
import os
from pathlib import Path
from PIL import Image
import os.path as osp
import numpy as np
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import natsort
from loguru import logger
The provided code snippet includes necessary dependencies for implementing the `draw_2d_box` function. Write a Python function `def draw_2d_box(image, corners_2d, linewidth=3)` to solve the following problem:
Draw 2d box corners @param corners_2d: [x_left, y_top, x_right, y_bottom]
Here is the function:
def draw_2d_box(image, corners_2d, linewidth=3):
""" Draw 2d box corners
@param corners_2d: [x_left, y_top, x_right, y_bottom]
"""
x1, y1, x2, y2 = corners_2d.astype(int)
box_pts = [
[(x1, y1), (x1, y2)],
[(x1, y2), (x2, y2)],
[(x2, y2), (x2, y1)],
[(x2, y1), (x1, y1)]
]
for pts in box_pts:
pt1, pt2 = pts
cv2.line(image, pt1, pt2, (0, 0, 255), linewidth) | Draw 2d box corners @param corners_2d: [x_left, y_top, x_right, y_bottom] |
14,594 | import cv2
import os
from pathlib import Path
from PIL import Image
import os.path as osp
import numpy as np
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import natsort
from loguru import logger
jet = cm.get_cmap("jet")
def make_matching_plot(
image0,
image1,
kpts0,
kpts1,
mkpts0,
mkpts1,
color,
text,
path=None,
show_keypoints=False,
fast_viz=False,
opencv_display=False,
opencv_title='matches',
small_text=[],
):
if fast_viz:
make_matching_plot_fast(
image0,
image1,
kpts0,
kpts1,
mkpts0,
mkpts1,
color,
text,
path,
show_keypoints
)
return
plot_image_pair([image0, image1]) # will create a new figure
if show_keypoints:
plot_keypoints(kpts0, kpts1, color='k', ps=4)
plot_keypoints(kpts0, kpts1, color='w', ps=2)
plot_matches(mkpts0, mkpts1, color)
fig = plt.gcf()
txt_color = 'k' if image0[:100, :100].mean() > 200 else 'w'
fig.text(
0.01,
0.99,
'\n'.join(text),
transform=fig.axes[0].transAxes,
fontsize=15,
va='top',
ha='left',
color=txt_color,
)
txt_color = 'k' if image0[-100:, :150].mean() > 200 else 'w'
fig.text(
0.01,
0.01,
'\n'.join(small_text),
transform=fig.axes[0].transAxes,
fontsize=5,
va='bottom',
ha='left',
color=txt_color
)
if path:
plt.savefig(str(path), bbox_inches='tight', pad_inches=0)
plt.close()
else:
return fig
def reproj(K, pose, pts_3d):
"""
Reproj 3d points to 2d points
"""
assert K.shape == (3, 3) or K.shape == (3, 4)
assert pose.shape == (3, 4) or pose.shape == (4, 4)
if K.shape == (3, 3):
K_homo = np.concatenate([K, np.zeros((3, 1))], axis=1)
else:
K_homo = K
if pose.shape == (3, 4):
pose_homo = np.concatenate([pose, np.array([[0, 0, 0, 1]])], axis=0)
else:
pose_homo = pose
pts_3d = pts_3d.reshape(-1, 3)
pts_3d_homo = np.concatenate([pts_3d, np.ones((pts_3d.shape[0], 1))], axis=1)
pts_3d_homo = pts_3d_homo.T
reproj_points = K_homo @ pose_homo @ pts_3d_homo
reproj_points = reproj_points[:] / reproj_points[2:]
reproj_points = reproj_points[:2, :].T
return reproj_points # [n, 2]
def draw_reprojection_pair(data, val_results, visual_color_type='conf'):
query_image = data['query_image'][0].cpu().numpy()
query_K = data['query_intrinsic'][0].cpu().numpy()
query_pose_gt = data['query_pose_gt'][0].cpu().numpy()
mconf = val_results['mconf']
mkpts3d = val_results['mkpts3d']
mkpts2d = val_results['mkpts2d']
mkpts3d_reprojed = reproj(query_K, query_pose_gt, mkpts3d)
figures = {'evaluation': []}
text = [
f'Num of matches: {mkpts3d_reprojed.shape[0]}',
]
if visual_color_type == 'conf':
if mkpts3d_reprojed.shape[0] != 0:
mconf_max = np.max(mconf)
mconf_min = np.min(mconf)
mconf_normalized = (mconf - mconf_min) / (
mconf_max - mconf_min + 1e-4
)
color = jet(mconf_normalized)
text += [
f'Max conf: {mconf_max}',
f'Min conf: {mconf_min}',
]
else:
color = np.array([])
elif visual_color_type == 'epi_error':
raise NotImplementedError
else:
raise NotImplementedError
figure = make_matching_plot(
query_image,
query_image,
mkpts2d,
mkpts3d_reprojed,
mkpts2d,
mkpts3d_reprojed,
color=color,
text=text
)
figures['evaluation'].append(figure)
return figures | null |
14,595 | import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
The provided code snippet includes necessary dependencies for implementing the `get_local_rank` function. Write a Python function `def get_local_rank() -> int` to solve the following problem:
Returns: The rank of the current process within the local (per-machine) process group.
Here is the function:
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP) | Returns: The rank of the current process within the local (per-machine) process group. |
14,596 | import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
The provided code snippet includes necessary dependencies for implementing the `get_local_size` function. Write a Python function `def get_local_size() -> int` to solve the following problem:
Returns: The size of the per-machine process group, i.e. the number of processes per machine.
Here is the function:
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) | Returns: The size of the per-machine process group, i.e. the number of processes per machine. |
14,597 | import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process() -> bool:
return get_rank() == 0 | null |
14,598 | import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
The provided code snippet includes necessary dependencies for implementing the `synchronize` function. Write a Python function `def synchronize()` to solve the following problem:
Helper function to synchronize (barrier) among all processes when using distributed training
Here is the function:
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier() | Helper function to synchronize (barrier) among all processes when using distributed training |
14,599 | import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
get_rank(), len(buffer) / (1024 ** 3), device
)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device) for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros((max_size - local_size,), dtype=torch.uint8, device=tensor.device)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
The provided code snippet includes necessary dependencies for implementing the `gather` function. Write a Python function `def gather(data, dst=0, group=None)` to solve the following problem:
Run gather on arbitrary picklable data (not necessarily tensors). Args: data: any picklable object dst (int): destination rank group: a torch process group. By default, will use a group which contains all ranks on gloo backend. Returns: list[data]: on dst, a list of data gathered from each rank. Otherwise, an empty list.
Here is the function:
def gather(data, dst=0, group=None):
"""
Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group=group) == 1:
return [data]
rank = dist.get_rank(group=group)
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
# receiving Tensor from all ranks
if rank == dst:
max_size = max(size_list)
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list
]
dist.gather(tensor, tensor_list, dst=dst, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
else:
dist.gather(tensor, [], dst=dst, group=group)
return [] | Run gather on arbitrary picklable data (not necessarily tensors). Args: data: any picklable object dst (int): destination rank group: a torch process group. By default, will use a group which contains all ranks on gloo backend. Returns: list[data]: on dst, a list of data gathered from each rank. Otherwise, an empty list. |
14,600 | import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
def all_gather(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
The provided code snippet includes necessary dependencies for implementing the `shared_random_seed` function. Write a Python function `def shared_random_seed()` to solve the following problem:
Returns: int: a random number that is the same across all workers. If workers need a shared RNG, they can use this shared seed to create one. All workers must call this function, otherwise it will deadlock.
Here is the function:
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2 ** 31)
all_ints = all_gather(ints)
return all_ints[0] | Returns: int: a random number that is the same across all workers. If workers need a shared RNG, they can use this shared seed to create one. All workers must call this function, otherwise it will deadlock. |
14,601 | import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
The provided code snippet includes necessary dependencies for implementing the `reduce_dict` function. Write a Python function `def reduce_dict(input_dict, average=True)` to solve the following problem:
Reduce the values in the dictionary from all processes so that process with rank 0 has the reduced results. Args: input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor. average (bool): whether to do average or sum Returns: a dict with the same keys as input_dict, after reduction.
Here is the function:
def reduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict | Reduce the values in the dictionary from all processes so that process with rank 0 has the reduced results. Args: input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor. average (bool): whether to do average or sum Returns: a dict with the same keys as input_dict, after reduction. |
14,602 | import sys
import sqlite3
import numpy as np
IS_PYTHON3 = sys.version_info[0] >= 3
def array_to_blob(array):
if IS_PYTHON3:
return array.tostring()
else:
return np.getbuffer(array) | null |
14,603 | import sys
import sqlite3
import numpy as np
def image_ids_to_pair_id(image_id1, image_id2):
if image_id1 > image_id2:
image_id1, image_id2 = image_id2, image_id1
return image_id1 * MAX_IMAGE_ID + image_id2
def pair_id_to_image_ids(pair_id):
image_id2 = pair_id % MAX_IMAGE_ID
image_id1 = (pair_id - image_id2) / MAX_IMAGE_ID
return image_id1, image_id2
def blob_to_array(blob, dtype, shape=(-1,)):
if IS_PYTHON3:
return np.fromstring(blob, dtype=dtype).reshape(*shape)
else:
return np.frombuffer(blob, dtype=dtype).reshape(*shape)
class COLMAPDatabase(sqlite3.Connection):
def connect(database_path):
return sqlite3.connect(str(database_path), factory=COLMAPDatabase)
def __init__(self, *args, **kwargs):
super(COLMAPDatabase, self).__init__(*args, **kwargs)
self.create_tables = lambda: self.executescript(CREATE_ALL)
self.create_cameras_table = \
lambda: self.executescript(CREATE_CAMERAS_TABLE)
self.create_descriptors_table = \
lambda: self.executescript(CREATE_DESCRIPTORS_TABLE)
self.create_images_table = \
lambda: self.executescript(CREATE_IMAGES_TABLE)
self.create_two_view_geometries_table = \
lambda: self.executescript(CREATE_TWO_VIEW_GEOMETRIES_TABLE)
self.create_keypoints_table = \
lambda: self.executescript(CREATE_KEYPOINTS_TABLE)
self.create_matches_table = \
lambda: self.executescript(CREATE_MATCHES_TABLE)
self.create_name_index = lambda: self.executescript(CREATE_NAME_INDEX)
def add_camera(self, model, width, height, params,
prior_focal_length=False, camera_id=None):
params = np.asarray(params, np.float64)
cursor = self.execute(
"INSERT INTO cameras VALUES (?, ?, ?, ?, ?, ?)",
(camera_id, model, width, height, array_to_blob(params),
prior_focal_length))
return cursor.lastrowid
def add_image(self, name, camera_id,
prior_q=np.zeros(4), prior_t=np.zeros(3), image_id=None):
cursor = self.execute(
"INSERT INTO images VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(image_id, name, camera_id, prior_q[0], prior_q[1], prior_q[2],
prior_q[3], prior_t[0], prior_t[1], prior_t[2]))
return cursor.lastrowid
def add_keypoints(self, image_id, keypoints):
assert(len(keypoints.shape) == 2)
assert(keypoints.shape[1] in [2, 4, 6])
keypoints = np.asarray(keypoints, np.float32)
self.execute(
"INSERT INTO keypoints VALUES (?, ?, ?, ?)",
(image_id,) + keypoints.shape + (array_to_blob(keypoints),))
def add_descriptors(self, image_id, descriptors):
descriptors = np.ascontiguousarray(descriptors, np.uint8)
self.execute(
"INSERT INTO descriptors VALUES (?, ?, ?, ?)",
(image_id,) + descriptors.shape + (array_to_blob(descriptors),))
def add_matches(self, image_id1, image_id2, matches):
assert(len(matches.shape) == 2)
assert(matches.shape[1] == 2)
if image_id1 > image_id2:
matches = matches[:,::-1]
pair_id = image_ids_to_pair_id(image_id1, image_id2)
matches = np.asarray(matches, np.uint32)
self.execute(
"INSERT INTO matches VALUES (?, ?, ?, ?)",
(pair_id,) + matches.shape + (array_to_blob(matches),))
def add_two_view_geometry(self, image_id1, image_id2, matches,
F=np.eye(3), E=np.eye(3), H=np.eye(3), config=2):
assert(len(matches.shape) == 2)
assert(matches.shape[1] == 2)
if image_id1 > image_id2:
matches = matches[:,::-1]
pair_id = image_ids_to_pair_id(image_id1, image_id2)
matches = np.asarray(matches, np.uint32)
F = np.asarray(F, dtype=np.float64)
E = np.asarray(E, dtype=np.float64)
H = np.asarray(H, dtype=np.float64)
self.execute(
"INSERT INTO two_view_geometries VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(pair_id,) + matches.shape + (array_to_blob(matches), config,
array_to_blob(F), array_to_blob(E), array_to_blob(H)))
def example_usage():
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--database_path", default="database.db")
args = parser.parse_args()
if os.path.exists(args.database_path):
print("ERROR: database path already exists -- will not modify it.")
return
# Open the database.
db = COLMAPDatabase.connect(args.database_path)
# For convenience, try creating all the tables upfront.
db.create_tables()
# Create dummy cameras.
model1, width1, height1, params1 = \
0, 1024, 768, np.array((1024., 512., 384.))
model2, width2, height2, params2 = \
2, 1024, 768, np.array((1024., 512., 384., 0.1))
camera_id1 = db.add_camera(model1, width1, height1, params1)
camera_id2 = db.add_camera(model2, width2, height2, params2)
# Create dummy images.
image_id1 = db.add_image("image1.png", camera_id1)
image_id2 = db.add_image("image2.png", camera_id1)
image_id3 = db.add_image("image3.png", camera_id2)
image_id4 = db.add_image("image4.png", camera_id2)
# Create dummy keypoints.
#
# Note that COLMAP supports:
# - 2D keypoints: (x, y)
# - 4D keypoints: (x, y, theta, scale)
# - 6D affine keypoints: (x, y, a_11, a_12, a_21, a_22)
num_keypoints = 1000
keypoints1 = np.random.rand(num_keypoints, 2) * (width1, height1)
keypoints2 = np.random.rand(num_keypoints, 2) * (width1, height1)
keypoints3 = np.random.rand(num_keypoints, 2) * (width2, height2)
keypoints4 = np.random.rand(num_keypoints, 2) * (width2, height2)
db.add_keypoints(image_id1, keypoints1)
db.add_keypoints(image_id2, keypoints2)
db.add_keypoints(image_id3, keypoints3)
db.add_keypoints(image_id4, keypoints4)
# Create dummy matches.
M = 50
matches12 = np.random.randint(num_keypoints, size=(M, 2))
matches23 = np.random.randint(num_keypoints, size=(M, 2))
matches34 = np.random.randint(num_keypoints, size=(M, 2))
db.add_matches(image_id1, image_id2, matches12)
db.add_matches(image_id2, image_id3, matches23)
db.add_matches(image_id3, image_id4, matches34)
# Commit the data to the file.
db.commit()
# Read and check cameras.
rows = db.execute("SELECT * FROM cameras")
camera_id, model, width, height, params, prior = next(rows)
params = blob_to_array(params, np.float64)
assert camera_id == camera_id1
assert model == model1 and width == width1 and height == height1
assert np.allclose(params, params1)
camera_id, model, width, height, params, prior = next(rows)
params = blob_to_array(params, np.float64)
assert camera_id == camera_id2
assert model == model2 and width == width2 and height == height2
assert np.allclose(params, params2)
# Read and check keypoints.
keypoints = dict(
(image_id, blob_to_array(data, np.float32, (-1, 2)))
for image_id, data in db.execute(
"SELECT image_id, data FROM keypoints"))
assert np.allclose(keypoints[image_id1], keypoints1)
assert np.allclose(keypoints[image_id2], keypoints2)
assert np.allclose(keypoints[image_id3], keypoints3)
assert np.allclose(keypoints[image_id4], keypoints4)
# Read and check matches.
pair_ids = [image_ids_to_pair_id(*pair) for pair in
((image_id1, image_id2),
(image_id2, image_id3),
(image_id3, image_id4))]
matches = dict(
(pair_id_to_image_ids(pair_id),
blob_to_array(data, np.uint32, (-1, 2)))
for pair_id, data in db.execute("SELECT pair_id, data FROM matches")
)
assert np.all(matches[(image_id1, image_id2)] == matches12)
assert np.all(matches[(image_id2, image_id3)] == matches23)
assert np.all(matches[(image_id3, image_id4)] == matches34)
# Clean up.
db.close()
if os.path.exists(args.database_path):
os.remove(args.database_path) | null |
14,604 | import os
import sys
import collections
import numpy as np
import struct
import argparse
def qvec2rotmat(qvec):
return np.array([
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]]) | null |
14,605 | import cv2
import numpy as np
import os.path as osp
from pathlib import Path
def ransac_PnP(K, pts_2d, pts_3d, scale=1):
""" solve pnp """
dist_coeffs = np.zeros(shape=[8, 1], dtype='float64')
pts_2d = np.ascontiguousarray(pts_2d.astype(np.float64))
pts_3d = np.ascontiguousarray(pts_3d.astype(np.float64))
K = K.astype(np.float64)
pts_3d *= scale
try:
_, rvec, tvec, inliers = cv2.solvePnPRansac(pts_3d, pts_2d, K, dist_coeffs, reprojectionError=5,
iterationsCount=10000, flags=cv2.SOLVEPNP_EPNP)
rotation = cv2.Rodrigues(rvec)[0]
tvec /= scale
pose = np.concatenate([rotation, tvec], axis=-1)
pose_homo = np.concatenate([pose, np.array([[0, 0, 0, 1]])], axis=0)
inliers = [] if inliers is None else inliers
return pose, pose_homo, inliers
except cv2.error:
print("CV ERROR")
return np.eye(4)[:3], np.eye(4), []
def query_pose_error(pose_pred, pose_gt):
"""
Input:
---------
pose_pred: np.array 3*4 or 4*4
pose_gt: np.array 3*4 or 4*4
"""
# Dim check:
if pose_pred.shape[0] == 4:
pose_pred = pose_pred[:3]
if pose_gt.shape[0] == 4:
pose_gt = pose_gt[:3]
translation_distance = np.linalg.norm(pose_pred[:, 3] - pose_gt[:, 3]) * 100
rotation_diff = np.dot(pose_pred[:, :3], pose_gt[:, :3].T)
trace = np.trace(rotation_diff)
trace = trace if trace <= 3 else 3
angular_distance = np.rad2deg(np.arccos((trace - 1.0) / 2.0))
return angular_distance, translation_distance
def compute_query_pose_errors(data, preds):
query_pose_gt = data['query_pose_gt'][0].cpu().numpy()
query_K = data['query_intrinsic'][0].cpu().numpy()
query_kpts2d = data['keypoints2d'][0].cpu().numpy()
query_kpts3d = data['keypoints3d'][0].cpu().numpy()
matches0 = preds['matches0'].cpu().numpy()
confidence = preds['matching_scores0'].cpu().numpy()
valid = matches0 > -1
mkpts2d = query_kpts2d[valid]
mkpts3d = query_kpts3d[matches0[valid]]
mconf = confidence[valid]
pose_pred = []
val_results = {'R_errs': [], 't_errs': [], 'inliers': []}
query_pose_pred, query_pose_pred_homo, inliers = ransac_PnP(
query_K,
mkpts2d,
mkpts3d
)
pose_pred.append(query_pose_pred_homo)
if query_pose_pred is None:
val_results['R_errs'].append(np.inf)
val_results['t_errs'].append(np.inf)
val_results['inliers'].append(np.array([])).astype(np.bool)
else:
R_err, t_err = query_pose_error(query_pose_pred, query_pose_gt)
val_results['R_errs'].append(R_err)
val_results['t_errs'].append(t_err)
val_results['inliers'].append(inliers)
pose_pred = np.stack(pose_pred)
val_results.update({'mkpts2d': mkpts2d, 'mkpts3d': mkpts3d, 'mconf': mconf})
return pose_pred, val_results | null |
14,606 | import cv2
import numpy as np
import os.path as osp
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `aggregate_metrics` function. Write a Python function `def aggregate_metrics(metrics, thres=[1, 3, 5])` to solve the following problem:
Aggregate metrics for the whole dataset: (This method should be called once per dataset) 1. AUC of the pose error (angular) at the threshold [5, 10, 20] 2. Mean matching precision at the threshold 5e-4
Here is the function:
def aggregate_metrics(metrics, thres=[1, 3, 5]):
""" Aggregate metrics for the whole dataset:
(This method should be called once per dataset)
1. AUC of the pose error (angular) at the threshold [5, 10, 20]
2. Mean matching precision at the threshold 5e-4
"""
R_errs = metrics['R_errs']
t_errs = metrics['t_errs']
degree_distance_metric = {}
for threshold in thres:
degree_distance_metric[f'{threshold}cm@{threshold}degree'] = np.mean(
(np.array(R_errs) < threshold) & (np.array(t_errs) < threshold)
)
return degree_distance_metric | Aggregate metrics for the whole dataset: (This method should be called once per dataset) 1. AUC of the pose error (angular) at the threshold [5, 10, 20] 2. Mean matching precision at the threshold 5e-4 |
14,607 | import json
import os
import glob
import hydra
import os.path as osp
from loguru import logger
from pathlib import Path
from omegaconf import DictConfig
def merge_(anno_2d_file, avg_anno_3d_file, collect_anno_3d_file,
idxs_file, img_id, ann_id, images, annotations):
""" To prepare training and test objects, we merge annotations about difference objs"""
with open(anno_2d_file, 'r') as f:
annos_2d = json.load(f)
for anno_2d in annos_2d:
img_id += 1
info = {
'id': img_id,
'img_file': anno_2d['img_file'],
}
images.append(info)
ann_id += 1
anno = {
'image_id': img_id,
'id': ann_id,
'pose_file': anno_2d['pose_file'],
'anno2d_file': anno_2d['anno_file'],
'avg_anno3d_file': avg_anno_3d_file,
'collect_anno3d_file': collect_anno_3d_file,
'idxs_file': idxs_file
}
annotations.append(anno)
return img_id, ann_id
The provided code snippet includes necessary dependencies for implementing the `merge_anno` function. Write a Python function `def merge_anno(cfg)` to solve the following problem:
Merge different objects' anno file into one anno file
Here is the function:
def merge_anno(cfg):
""" Merge different objects' anno file into one anno file """
anno_dirs = []
if cfg.split == 'train':
names = cfg.train.names
elif cfg.split == 'val':
names = cfg.val.names
for name in names:
anno_dir = osp.join(cfg.datamodule.data_dir, name, f'outputs_{cfg.network.detection}_{cfg.network.matching}', 'anno')
anno_dirs.append(anno_dir)
img_id = 0
ann_id = 0
images = []
annotations = []
for anno_dir in anno_dirs:
logger.info(f'Merging anno dir: {anno_dir}')
anno_2d_file = osp.join(anno_dir, 'anno_2d.json')
avg_anno_3d_file = osp.join(anno_dir, 'anno_3d_average.npz')
collect_anno_3d_file = osp.join(anno_dir, 'anno_3d_collect.npz')
idxs_file = osp.join(anno_dir, 'idxs.npy')
if not osp.isfile(anno_2d_file) or not osp.isfile(avg_anno_3d_file) or not osp.isfile(collect_anno_3d_file):
logger.info(f'No annotation in: {anno_dir}')
continue
img_id, ann_id = merge_(anno_2d_file, avg_anno_3d_file, collect_anno_3d_file,
idxs_file, img_id, ann_id, images, annotations)
logger.info(f'Total num: {len(images)}')
instance = {'images': images, 'annotations': annotations}
out_dir = osp.dirname(cfg.datamodule.out_path)
Path(out_dir).mkdir(exist_ok=True, parents=True)
with open(cfg.datamodule.out_path, 'w') as f:
json.dump(instance, f) | Merge different objects' anno file into one anno file |
14,608 | import glob
import torch
import hydra
from tqdm import tqdm
import os.path as osp
import numpy as np
from PIL import Image
from loguru import logger
from torch.utils.data import DataLoader
from src.utils import data_utils, path_utils, eval_utils, vis_utils
from pytorch_lightning import seed_everything
def inference_core(cfg, data_root, seq_dir, sfm_model_dir):
""" Inference & visualize"""
from src.datasets.normalized_dataset import NormalizedDataset
from src.sfm.extract_features import confs
from src.evaluators.cmd_evaluator import Evaluator
matching_model, extractor_model = load_model(cfg)
img_lists, paths = get_default_paths(cfg, data_root, seq_dir, sfm_model_dir)
dataset = NormalizedDataset(img_lists, confs[cfg.network.detection]['preprocessing'])
loader = DataLoader(dataset, num_workers=1)
evaluator = Evaluator()
idx = 0
num_leaf = cfg.num_leaf
avg_data = np.load(paths['avg_anno_3d_path'])
clt_data = np.load(paths['clt_anno_3d_path'])
idxs = np.load(paths['idxs_path'])
keypoints3d = torch.Tensor(clt_data['keypoints3d']).cuda()
num_3d = keypoints3d.shape[0]
# Load average 3D features:
avg_descriptors3d, _ = data_utils.pad_features3d_random(
avg_data['descriptors3d'],
avg_data['scores3d'],
num_3d
)
# Load corresponding 2D features of each 3D point:
clt_descriptors, _ = data_utils.build_features3d_leaves(
clt_data['descriptors3d'],
clt_data['scores3d'],
idxs, num_3d, num_leaf
)
for data in tqdm(loader):
img_path = data['path'][0]
inp = data['image'].cuda()
intrin_path = path_utils.get_intrin_path_by_color(img_path, det_type=cfg.object_detect_mode)
K_crop = np.loadtxt(intrin_path)
# Detect query image keypoints and extract descriptors:
pred_detection = extractor_model(inp)
pred_detection = {k: v[0].cpu().numpy() for k, v in pred_detection.items()}
# 2D-3D matching by GATsSPG:
inp_data = pack_data(avg_descriptors3d, clt_descriptors,
keypoints3d, pred_detection, data['size'])
pred, _ = matching_model(inp_data)
matches = pred['matches0'].detach().cpu().numpy()
valid = matches > -1
kpts2d = pred_detection['keypoints']
kpts3d = inp_data['keypoints3d'][0].detach().cpu().numpy()
confidence = pred['matching_scores0'].detach().cpu().numpy()
mkpts2d, mkpts3d, mconf = kpts2d[valid], kpts3d[matches[valid]], confidence[valid]
# Estimate object pose by 2D-3D correspondences:
pose_pred, pose_pred_homo, inliers = eval_utils.ransac_PnP(K_crop, mkpts2d, mkpts3d, scale=1000)
# Evaluate:
gt_pose_path = path_utils.get_gt_pose_path_by_color(img_path, det_type=cfg.object_detect_mode)
pose_gt = np.loadtxt(gt_pose_path)
evaluator.evaluate(pose_pred, pose_gt)
# Visualize:
if cfg.save_wis3d:
poses = [pose_gt, pose_pred_homo]
box3d_path = path_utils.get_3d_box_path(data_root)
intrin_full_path = path_utils.get_intrin_full_path(seq_dir)
image_full_path = path_utils.get_img_full_path_by_color(img_path, det_type=cfg.object_detect_mode)
image_full = vis_utils.vis_reproj(image_full_path, poses, box3d_path, intrin_full_path,
save_demo=cfg.save_demo, demo_root=cfg.demo_root)
mkpts3d_2d = vis_utils.reproj(K_crop, pose_gt, mkpts3d)
image0 = Image.open(img_path).convert('LA')
image1 = image0.copy()
vis_utils.dump_wis3d(idx, cfg, seq_dir, image0, image1, image_full,
mkpts2d, mkpts3d_2d, mconf, inliers)
idx += 1
eval_result = evaluator.summarize()
obj_name = sfm_model_dir.split('/')[-1]
seq_name = seq_dir.split('/')[-1]
eval_utils.record_eval_result(cfg.output.eval_dir, obj_name, seq_name, eval_result)
def inference(cfg):
data_dirs = cfg.input.data_dirs
sfm_model_dirs = cfg.input.sfm_model_dirs
if isinstance(data_dirs, str) and isinstance(sfm_model_dirs, str):
data_dirs = [data_dirs]
sfm_model_dirs = [sfm_model_dirs]
for data_dir, sfm_model_dir in tqdm(zip(data_dirs, sfm_model_dirs), total=len(data_dirs)):
splits = data_dir.split(" ")
data_root = splits[0]
for seq_name in splits[1:]:
seq_dir = osp.join(data_root, seq_name)
logger.info(f'Eval {seq_dir}')
inference_core(cfg, data_root, seq_dir, sfm_model_dir) | null |
14,609 | import glob
import torch
import hydra
from tqdm import tqdm
import os
import os.path as osp
import natsort
from loguru import logger
from torch.utils.data import DataLoader
from src.utils import data_utils
from src.utils.model_io import load_network
from src.local_feature_2D_detector import LocalFeatureObjectDetector
from pytorch_lightning import seed_everything
The provided code snippet includes necessary dependencies for implementing the `pack_data` function. Write a Python function `def pack_data(avg_descriptors3d, clt_descriptors, keypoints3d, detection, image_size)` to solve the following problem:
Prepare data for OnePose inference
Here is the function:
def pack_data(avg_descriptors3d, clt_descriptors, keypoints3d, detection, image_size):
"""Prepare data for OnePose inference"""
keypoints2d = torch.Tensor(detection["keypoints"])
descriptors2d = torch.Tensor(detection["descriptors"])
inp_data = {
"keypoints2d": keypoints2d[None].cuda(), # [1, n1, 2]
"keypoints3d": keypoints3d[None].cuda(), # [1, n2, 3]
"descriptors2d_query": descriptors2d[None].cuda(), # [1, dim, n1]
"descriptors3d_db": avg_descriptors3d[None].cuda(), # [1, dim, n2]
"descriptors2d_db": clt_descriptors[None].cuda(), # [1, dim, n2*num_leaf]
"image_size": image_size,
}
return inp_data | Prepare data for OnePose inference |
14,610 | import glob
import torch
import hydra
from tqdm import tqdm
import os
import os.path as osp
import natsort
from loguru import logger
from torch.utils.data import DataLoader
from src.utils import data_utils
from src.utils.model_io import load_network
from src.local_feature_2D_detector import LocalFeatureObjectDetector
from pytorch_lightning import seed_everything
def inference_core(cfg, data_root, seq_dir, sfm_model_dir):
"""Inference & visualize"""
from src.datasets.normalized_dataset import NormalizedDataset
from src.sfm.extract_features import confs
# Load models and prepare data:
extractor_model, matching_2D_model = load_2D_matching_model(cfg)
img_lists, paths = get_default_paths(cfg, data_root, seq_dir, sfm_model_dir)
K, _ = data_utils.get_K(paths["intrin_full_path"])
local_feature_obj_detector = LocalFeatureObjectDetector(
extractor_model,
matching_2D_model,
sfm_ws_dir=paths["sfm_ws_dir"],
n_ref_view=cfg.n_ref_view,
output_results=True,
detect_save_dir=paths['output_detect_img_dir'],
K_crop_save_dir=paths['output_K_crop_dir']
)
dataset = NormalizedDataset(
img_lists, confs[cfg.network.detection]["preprocessing"]
)
loader = DataLoader(dataset, num_workers=1)
# Begin Object detection:
for id, data in enumerate(tqdm(loader)):
img_path = data["path"][0]
inp = data["image"].cuda()
# Detect object by 2D local feature matching for the first frame:
local_feature_obj_detector.detect(inp, img_path, K)
def inference(cfg):
data_dirs = cfg.input.data_dirs
sfm_model_dirs = cfg.input.sfm_model_dirs
if isinstance(data_dirs, str) and isinstance(sfm_model_dirs, str):
data_dirs = [data_dirs]
sfm_model_dirs = [sfm_model_dirs]
for data_dir, sfm_model_dir in tqdm(
zip(data_dirs, sfm_model_dirs), total=len(data_dirs)
):
splits = data_dir.split(" ")
data_root = splits[0]
for seq_name in splits[1:]:
seq_dir = osp.join(data_root, seq_name)
logger.info(f"Run feature matching object detector for: {seq_dir}")
inference_core(cfg, data_root, seq_dir, sfm_model_dir) | null |
14,611 | import os
import cv2
import tqdm
import numpy as np
import os.path as osp
import argparse
from pathlib import Path
from transforms3d import affines, quaternions
from src.utils import data_utils
def get_arkit_default_path(data_dir):
video_file = osp.join(data_dir, 'Frames.m4v')
color_dir = osp.join(data_dir, 'color')
Path(color_dir).mkdir(parents=True, exist_ok=True)
box_file = osp.join(data_dir, 'Box.txt')
assert Path(box_file).exists()
out_3D_box_dir = osp.join(osp.dirname(data_dir), 'box3d_corners.txt')
out_pose_dir = osp.join(data_dir, 'poses')
Path(out_pose_dir).mkdir(parents=True, exist_ok=True)
pose_file = osp.join(data_dir, 'ARposes.txt')
assert Path(pose_file).exists()
reproj_box_dir = osp.join(data_dir, 'reproj_box')
Path(reproj_box_dir).mkdir(parents=True, exist_ok=True)
out_box_dir = osp.join(data_dir, 'bbox')
Path(out_box_dir).mkdir(parents=True, exist_ok=True)
orig_intrin_file = osp.join(data_dir, 'Frames.txt')
assert Path(orig_intrin_file).exists()
final_intrin_file = osp.join(data_dir, 'intrinsics.txt')
intrin_dir = osp.join(data_dir, 'intrin')
Path(intrin_dir).mkdir(parents=True, exist_ok=True)
M_dir = osp.join(data_dir, 'M')
Path(M_dir).mkdir(parents=True, exist_ok=True)
paths = {
'video_file': video_file,
'color_dir': color_dir,
'box_path': box_file,
'pose_file': pose_file,
'out_box_dir': out_box_dir,
'out_3D_box_dir': out_3D_box_dir,
'reproj_box_dir': reproj_box_dir,
'out_pose_dir': out_pose_dir,
'orig_intrin_file': orig_intrin_file,
'final_intrin_file': final_intrin_file,
'intrin_dir': intrin_dir,
'M_dir': M_dir
}
return paths
def get_bbox3d(box_path):
assert Path(box_path).exists()
with open(box_path, 'r') as f:
lines = f.readlines()
box_data = [float(e) for e in lines[1].strip().split(',')]
ex, ey, ez = box_data[3: 6]
bbox_3d = np.array([
[-ex, -ey, -ez],
[ex, -ey, -ez],
[ex, -ey, ez],
[-ex, -ey, ez],
[-ex, ey, -ez],
[ ex, ey, -ez],
[ ex, ey, ez],
[-ex, ey, ez]
]) * 0.5
bbox_3d_homo = np.concatenate([bbox_3d, np.ones((8, 1))], axis=1)
return bbox_3d, bbox_3d_homo
def parse_box(box_path):
with open(box_path, 'r') as f:
lines = f.readlines()
data = [float(e) for e in lines[1].strip().split(',')]
position = data[:3]
quaternion = data[6:]
rot_mat = quaternions.quat2mat(quaternion)
T_ow = affines.compose(position, rot_mat, np.ones(3))
return T_ow
def reproj(K_homo, pose, points3d_homo):
assert K_homo.shape == (3, 4)
assert pose.shape == (4, 4)
assert points3d_homo.shape[0] == 4 # [4 ,n]
reproj_points = K_homo @ pose @ points3d_homo
reproj_points = reproj_points[:] / reproj_points[2:]
reproj_points = reproj_points[:2, :].T
return reproj_points # [n, 2]
def parse_video(paths, downsample_rate=5, bbox_3d_homo=None, hw=512):
orig_intrin_file = paths['final_intrin_file']
K, K_homo = data_utils.get_K(orig_intrin_file)
intrin_dir = paths['intrin_dir']
cap = cv2.VideoCapture(paths['video_file'])
index = 0
while True:
ret, image = cap.read()
if not ret:
break
if index % downsample_rate == 0:
img_name = osp.join(paths['color_dir'], '{}.png'.format(index))
save_intrin_path = osp.join(intrin_dir, '{}.txt'.format(index))
reproj_box3d_file = osp.join(paths['reproj_box_dir'], '{}.txt'.format(index))
if not osp.isfile(reproj_box3d_file):
continue
reproj_box3d = np.loadtxt(osp.join(paths['reproj_box_dir'], '{}.txt'.format(index))).astype(int)
x0, y0 = reproj_box3d.min(0)
x1, y1 = reproj_box3d.max(0)
box = np.array([x0, y0, x1, y1])
resize_shape = np.array([y1 - y0, x1 - x0])
K_crop, K_crop_homo = data_utils.get_K_crop_resize(box, K, resize_shape)
image_crop, trans1 = data_utils.get_image_crop_resize(image, box, resize_shape)
box_new = np.array([0, 0, x1-x0, y1-y0])
resize_shape = np.array([hw, hw])
K_crop, K_crop_homo = data_utils.get_K_crop_resize(box_new, K_crop, resize_shape)
image_crop, trans2 = data_utils.get_image_crop_resize(image_crop, box_new, resize_shape)
trans_full_to_crop = trans2 @ trans1
trans_crop_to_full = np.linalg.inv(trans_full_to_crop)
np.savetxt(osp.join(paths['M_dir'], '{}.txt'.format(index)), trans_crop_to_full)
pose = np.loadtxt(osp.join(paths['out_pose_dir'], '{}.txt'.format(index)))
reproj_crop = reproj(K_crop_homo, pose, bbox_3d_homo.T)
x0_new, y0_new = reproj_crop.min(0)
x1_new, y1_new = reproj_crop.max(0)
box_new = np.array([x0_new, y0_new, x1_new, y1_new])
np.savetxt(osp.join(paths['out_box_dir'], '{}.txt'.format(index)), box_new)
cv2.imwrite(img_name, image_crop)
# cv2.imwrite(out_mask_file, mask_crop)
full_img_dir = paths['color_dir'] + '_full'
Path(full_img_dir).mkdir(exist_ok=True, parents=True)
cv2.imwrite(osp.join(full_img_dir, '{}.png'.format(index)), image)
np.savetxt(save_intrin_path, K_crop)
index += 1
cap.release()
def data_process_anno(data_dir, downsample_rate=1, hw=512):
paths = get_arkit_default_path(data_dir)
with open(paths['orig_intrin_file'], 'r') as f:
lines = [l.strip() for l in f.readlines() if len(l) > 0 and l[0] != '#']
eles = [[float(e) for e in l.split(',')] for l in lines]
data = np.array(eles)
fx, fy, cx, cy = np.average(data, axis=0)[2:]
with open(paths['final_intrin_file'], 'w') as f:
f.write('fx: {0}\nfy: {1}\ncx: {2}\ncy: {3}'.format(fx, fy, cx, cy))
bbox_3d, bbox_3d_homo = get_bbox3d(paths['box_path'])
np.savetxt(paths['out_3D_box_dir'], bbox_3d)
K_homo = np.array([
[fx, 0, cx, 0],
[0, fy, cy, 0],
[0, 0, 1, 0]
])
with open(paths['pose_file'], 'r') as f:
lines = [l.strip() for l in f.readlines()]
index = 0
for line in tqdm.tqdm(lines):
if len(line) == 0 or line[0] == '#':
continue
if index % downsample_rate == 0:
eles = line.split(',')
data = [float(e) for e in eles]
position = data[1:4]
quaternion = data[4:]
rot_mat = quaternions.quat2mat(quaternion)
rot_mat = rot_mat @ np.array([
[1, 0, 0],
[0, -1, 0],
[0, 0, -1]
])
T_ow = parse_box(paths['box_path'])
T_cw = affines.compose(position, rot_mat, np.ones(3))
T_wc = np.linalg.inv(T_cw)
T_oc = T_wc @ T_ow
pose_save_path = osp.join(paths['out_pose_dir'], '{}.txt'.format(index))
box_save_path = osp.join(paths['reproj_box_dir'], '{}.txt'.format(index))
reproj_box3d = reproj(K_homo, T_oc, bbox_3d_homo.T)
x0, y0 = reproj_box3d.min(0)
x1, y1 = reproj_box3d.max(0)
if x0 < -1000 or y0 < -1000 or x1 > 3000 or y1 > 3000:
continue
np.savetxt(pose_save_path, T_oc)
np.savetxt(box_save_path, reproj_box3d)
index += 1
parse_video(paths, downsample_rate, bbox_3d_homo, hw=hw)
# Make fake data for demo annotate video without BA:
if osp.exists(osp.join(osp.dirname(paths['intrin_dir']), 'intrin_ba')):
os.system(f"rm -rf {osp.join(osp.dirname(paths['intrin_dir']), 'intrin_ba')}")
os.system(f"ln -s {paths['intrin_dir']} {osp.join(osp.dirname(paths['intrin_dir']), 'intrin_ba')}")
if osp.exists(osp.join(osp.dirname(paths['out_pose_dir']), 'poses_ba')):
os.system(f"rm -rf {osp.join(osp.dirname(paths['out_pose_dir']), 'poses_ba')}")
os.system(f"ln -s {paths['out_pose_dir']} {osp.join(osp.dirname(paths['out_pose_dir']), 'poses_ba')}") | null |
14,612 | import os
import cv2
import tqdm
import numpy as np
import os.path as osp
import argparse
from pathlib import Path
from transforms3d import affines, quaternions
from src.utils import data_utils
def get_test_default_path(data_dir):
video_file = osp.join(data_dir, 'Frames.m4v')
# box_file = osp.join(data_dir, 'RefinedBox.txt')
box_file = osp.join(data_dir, 'Box.txt')
if osp.exists(box_file):
os.remove(box_file)
color_full_dir = osp.join(data_dir, 'color_full')
Path(color_full_dir).mkdir(parents=True, exist_ok=True)
pose_file = osp.join(data_dir, 'ARposes.txt')
if osp.exists(pose_file):
os.remove(pose_file)
orig_intrin_file = osp.join(data_dir, 'Frames.txt')
final_intrin_file = osp.join(data_dir, 'intrinsics.txt')
paths = {
'video_file': video_file,
'color_full_dir': color_full_dir,
'orig_intrin_file': orig_intrin_file,
'final_intrin_file': final_intrin_file,
}
return paths
def data_process_test(data_dir, downsample_rate=1):
paths = get_test_default_path(data_dir)
# Parse intrinsic:
with open(paths['orig_intrin_file'], 'r') as f:
lines = [l.strip() for l in f.readlines() if len(l) > 0 and l[0] != '#']
eles = [[float(e) for e in l.split(',')] for l in lines]
data = np.array(eles)
fx, fy, cx, cy = np.average(data, axis=0)[2:]
with open(paths['final_intrin_file'], 'w') as f:
f.write('fx: {0}\nfy: {1}\ncx: {2}\ncy: {3}'.format(fx, fy, cx, cy))
# Parse video:
cap = cv2.VideoCapture(paths['video_file'])
index = 0
while True:
ret, image = cap.read()
if not ret:
break
if index % downsample_rate == 0:
full_img_dir = paths['color_full_dir']
cv2.imwrite(osp.join(full_img_dir, '{}.png'.format(index)), image)
index += 1
cap.release() | null |
14,613 | import os
import cv2
import tqdm
import numpy as np
import os.path as osp
import argparse
from pathlib import Path
from transforms3d import affines, quaternions
from src.utils import data_utils
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--scanned_object_path", type=str, required=True)
args = parser.parse_args()
return args | null |
14,614 | import glob
import torch
import hydra
from tqdm import tqdm
import os
import os.path as osp
import numpy as np
import natsort
from loguru import logger
from torch.utils.data import DataLoader
from src.utils import data_utils, path_utils, eval_utils, vis_utils
from src.utils.model_io import load_network
from src.local_feature_2D_detector import LocalFeatureObjectDetector
from src.tracker.ba_tracker import BATracker
from pytorch_lightning import seed_everything
def inference_core(cfg, data_root, seq_dir, sfm_model_dir):
"""Inference & visualize"""
from src.datasets.normalized_dataset import NormalizedDataset
from src.sfm.extract_features import confs
if cfg.use_tracking:
logger.warning("The tracking module is under development. "
"Running OnePose inference without tracking instead.")
tracker = BATracker(cfg)
track_interval = 5
else:
logger.info("Running OnePose inference without tracking")
# Load models and prepare data:
matching_model, extractor_model = load_model(cfg)
matching_2D_model = load_2D_matching_model(cfg)
img_lists, paths = get_default_paths(cfg, data_root, seq_dir, sfm_model_dir)
# sort images
im_ids = [int(osp.basename(i).replace('.png', '')) for i in img_lists]
im_ids.sort()
img_lists = [osp.join(osp.dirname(img_lists[0]), f'{im_id}.png') for im_id in im_ids]
K, _ = data_utils.get_K(paths["intrin_full_path"])
box3d_path = path_utils.get_3d_box_path(data_root)
bbox3d = np.loadtxt(box3d_path)
local_feature_obj_detector = LocalFeatureObjectDetector(
extractor_model,
matching_2D_model,
sfm_ws_dir=paths["sfm_ws_dir"],
output_results=False,
detect_save_dir=paths["vis_detector_dir"],
)
dataset = NormalizedDataset(
img_lists, confs[cfg.network.detection]["preprocessing"]
)
loader = DataLoader(dataset, num_workers=1)
# Prepare 3D features:
num_leaf = cfg.num_leaf
avg_data = np.load(paths["avg_anno_3d_path"])
clt_data = np.load(paths["clt_anno_3d_path"])
idxs = np.load(paths["idxs_path"])
keypoints3d = torch.Tensor(clt_data["keypoints3d"]).cuda()
num_3d = keypoints3d.shape[0]
# load average 3D features:
avg_descriptors3d, _ = data_utils.pad_features3d_random(
avg_data["descriptors3d"], avg_data["scores3d"], num_3d
)
# load corresponding 2D features of each 3D point:
clt_descriptors, _ = data_utils.build_features3d_leaves(
clt_data["descriptors3d"], clt_data["scores3d"], idxs, num_3d, num_leaf
)
pred_poses = {} # {id:[pred_pose, inliers]}
for id, data in enumerate(tqdm(loader)):
with torch.no_grad():
img_path = data["path"][0]
inp = data["image"].cuda()
# Detect object:
if id == 0:
# Detect object by 2D local feature matching for the first frame:
bbox, inp_crop, K_crop = local_feature_obj_detector.detect(inp, img_path, K)
else:
# Use 3D bbox and previous frame's pose to yield current frame 2D bbox:
previous_frame_pose, inliers = pred_poses[id - 1]
if len(inliers) < 8:
# Consider previous pose estimation failed, reuse local feature object detector:
bbox, inp_crop, K_crop = local_feature_obj_detector.detect(
inp, img_path, K
)
else:
(
bbox,
inp_crop,
K_crop,
) = local_feature_obj_detector.previous_pose_detect(
img_path, K, previous_frame_pose, bbox3d
)
# Detect query image(cropped) keypoints and extract descriptors:
pred_detection = extractor_model(inp_crop)
pred_detection = {k: v[0].cpu().numpy() for k, v in pred_detection.items()}
# 2D-3D matching by GATsSPG:
inp_data = pack_data(
avg_descriptors3d,
clt_descriptors,
keypoints3d,
pred_detection,
data["size"],
)
pred, _ = matching_model(inp_data)
matches = pred["matches0"].detach().cpu().numpy()
valid = matches > -1
kpts2d = pred_detection["keypoints"]
kpts3d = inp_data["keypoints3d"][0].detach().cpu().numpy()
confidence = pred["matching_scores0"].detach().cpu().numpy()
mkpts2d, mkpts3d, mconf = (
kpts2d[valid],
kpts3d[matches[valid]],
confidence[valid],
)
# Estimate object pose by 2D-3D correspondences:
pose_pred, pose_pred_homo, inliers = eval_utils.ransac_PnP(
K_crop, mkpts2d, mkpts3d, scale=1000
)
# Store previous estimated poses:
pred_poses[id] = [pose_pred, inliers]
image_crop = np.asarray((inp_crop * 255).squeeze().cpu().numpy(), dtype=np.uint8)
if cfg.use_tracking:
frame_dict = {
'im_path': image_crop,
'kpt_pred': pred_detection,
'pose_pred': pose_pred_homo,
'pose_gt': pose_pred_homo,
'K': K_crop,
'K_crop': K_crop,
'data': data
}
use_update = id % track_interval == 0
if use_update:
mkpts3d_db_inlier = mkpts3d[inliers.flatten()]
mkpts2d_q_inlier = mkpts2d[inliers.flatten()]
n_kpt = kpts2d.shape[0]
valid_query_id = np.where(valid != False)[0][inliers.flatten()]
kpts3d_full = np.ones([n_kpt, 3]) * 10086
kpts3d_full[valid_query_id] = mkpts3d_db_inlier
kpt3d_ids = matches[valid][inliers.flatten()]
kf_dict = {
'im_path': image_crop,
'kpt_pred': pred_detection,
'valid_mask': valid,
'mkpts2d': mkpts2d_q_inlier,
'mkpts3d': mkpts3d_db_inlier,
'kpt3d_full': kpts3d_full,
'inliers': inliers,
'kpt3d_ids': kpt3d_ids,
'valid_query_id': valid_query_id,
'pose_pred': pose_pred_homo,
'pose_gt': pose_pred_homo,
'K': K_crop
}
need_update = not tracker.update_kf(kf_dict)
if id == 0:
tracker.add_kf(kf_dict)
id += 1
pose_opt = pose_pred_homo
else:
pose_init, pose_opt, ba_log = tracker.track(frame_dict, auto_mode=False)
else:
pose_opt = pose_pred_homo
# Visualize:
vis_utils.save_demo_image(
pose_opt,
K,
image_path=img_path,
box3d_path=box3d_path,
draw_box=len(inliers) > 6,
save_path=osp.join(paths["vis_box_dir"], f"{id}.jpg"),
)
# Output video to visualize estimated poses:
vis_utils.make_video(paths["vis_box_dir"], paths["demo_video_path"])
def inference(cfg):
data_dirs = cfg.input.data_dirs
sfm_model_dirs = cfg.input.sfm_model_dirs
if isinstance(data_dirs, str) and isinstance(sfm_model_dirs, str):
data_dirs = [data_dirs]
sfm_model_dirs = [sfm_model_dirs]
for data_dir, sfm_model_dir in tqdm(
zip(data_dirs, sfm_model_dirs), total=len(data_dirs)
):
splits = data_dir.split(" ")
data_root = splits[0]
for seq_name in splits[1:]:
seq_dir = osp.join(data_root, seq_name)
logger.info(f"Eval {seq_dir}")
inference_core(cfg, data_root, seq_dir, sfm_model_dir) | null |
14,615 | from typing import Optional
import fire
import torch
import tqdm
import transformers
from train_ppo import LlamaRewardModel
class LlamaRewardModel(LlamaForCausalLM):
def __init__(self, config, opt, tokenizer):
super().__init__(config)
self.opt = opt
self.tokenizer = tokenizer
self.reward_head = torch.nn.Linear(config.hidden_size, 1, bias=False)
def forward(self, decoder_input, only_last=True):
attention_mask = decoder_input.ne(self.tokenizer.pad_token_id)
output = self.model.forward(
input_ids=decoder_input,
attention_mask=attention_mask,
return_dict=True,
use_cache=False
)
if only_last:
logits = self.reward_head(output.last_hidden_state[:, -1, :]).squeeze(-1)
else:
logits = self.reward_head(output.last_hidden_state).squeeze(-1)
return (logits,)
The provided code snippet includes necessary dependencies for implementing the `make_diff` function. Write a Python function `def make_diff( path_raw: str, path_tuned: str, path_diff: str, device="cpu", # "cuda" or "cpu" )` to solve the following problem:
Make the weight diff. This function is given to present full transparency of how the weight diff was created. Run: python weight_diff.py make_diff --path_raw decapoda-research/llama-7b-hf --path_tuned <your_path_tuned> --path_diff <your_path_diff>
Here is the function:
def make_diff(
path_raw: str, path_tuned: str, path_diff: str, device="cpu", # "cuda" or "cpu"
):
"""Make the weight diff.
This function is given to present full transparency of how the weight diff was created.
Run:
python weight_diff.py make_diff --path_raw decapoda-research/llama-7b-hf --path_tuned <your_path_tuned> --path_diff <your_path_diff>
"""
model_tuned = LlamaRewardModel.from_pretrained(
path_tuned,
opt=None,
tokenizer=None,
device_map={"": torch.device(device)},
torch_dtype=torch.float32,
low_cpu_mem_usage=True,
)
# zh: decapoda-research/llama-7b-hf
# en:
model_raw = transformers.AutoModelForCausalLM.from_pretrained(
path_raw,
device_map={"": torch.device(device)},
torch_dtype=torch.float32,
low_cpu_mem_usage=True,
)
state_dict_tuned = model_tuned.state_dict()
state_dict_raw = model_raw.state_dict()
for key in tqdm.tqdm(state_dict_tuned):
print(key)
check_allsum = sum(state_dict_tuned[key].sum() for key in state_dict_tuned) # 49954.0859375
print(f'check sum is {check_allsum}')
for key in tqdm.tqdm(state_dict_tuned):
if 'layers' in key:
state_dict_tuned[key].add_(-state_dict_raw[key])
model_tuned.save_pretrained(path_diff) | Make the weight diff. This function is given to present full transparency of how the weight diff was created. Run: python weight_diff.py make_diff --path_raw decapoda-research/llama-7b-hf --path_tuned <your_path_tuned> --path_diff <your_path_diff> |
14,616 | from typing import Optional
import fire
import torch
import tqdm
import transformers
from train_ppo import LlamaRewardModel
class LlamaRewardModel(LlamaForCausalLM):
def __init__(self, config, opt, tokenizer):
super().__init__(config)
self.opt = opt
self.tokenizer = tokenizer
self.reward_head = torch.nn.Linear(config.hidden_size, 1, bias=False)
def forward(self, decoder_input, only_last=True):
attention_mask = decoder_input.ne(self.tokenizer.pad_token_id)
output = self.model.forward(
input_ids=decoder_input,
attention_mask=attention_mask,
return_dict=True,
use_cache=False
)
if only_last:
logits = self.reward_head(output.last_hidden_state[:, -1, :]).squeeze(-1)
else:
logits = self.reward_head(output.last_hidden_state).squeeze(-1)
return (logits,)
The provided code snippet includes necessary dependencies for implementing the `recover` function. Write a Python function `def recover( path_raw, path_diff, path_tuned: Optional[str] = None, device="cpu", check_integrity_naively=True, )` to solve the following problem:
Recover the original weights from the released weight diff. This function is given for you to run. Things to do before running this: 1. Convert Meta's released weights into huggingface format. Follow this guide: https://huggingface.co/docs/transformers/main/model_doc/llama 2. Make sure you cloned the released weight diff into your local machine. The weight diff is located at: https://huggingface.co/tatsu-lab/alpaca-7b/tree/main 3. Run this function with the correct paths. E.g., python weight_diff.py recover --path_raw <path_to_step_1_dir> --path_diff <path_to_step_2_dir> Additional notes: - If things run too slowly, and you have an 80G GPU lying around, let GPU go brrr by setting `--device "cuda"`. - If you want to save the recovered weights, set `--path_tuned <your_path_tuned>`. Next time you can load the recovered weights directly from `<your_path_tuned>`.
Here is the function:
def recover(
path_raw,
path_diff,
path_tuned: Optional[str] = None,
device="cpu",
check_integrity_naively=True,
):
"""Recover the original weights from the released weight diff.
This function is given for you to run.
Things to do before running this:
1. Convert Meta's released weights into huggingface format. Follow this guide:
https://huggingface.co/docs/transformers/main/model_doc/llama
2. Make sure you cloned the released weight diff into your local machine. The weight diff is located at:
https://huggingface.co/tatsu-lab/alpaca-7b/tree/main
3. Run this function with the correct paths. E.g.,
python weight_diff.py recover --path_raw <path_to_step_1_dir> --path_diff <path_to_step_2_dir>
Additional notes:
- If things run too slowly, and you have an 80G GPU lying around, let GPU go brrr by setting `--device "cuda"`.
- If you want to save the recovered weights, set `--path_tuned <your_path_tuned>`.
Next time you can load the recovered weights directly from `<your_path_tuned>`.
"""
model_raw = transformers.AutoModelForCausalLM.from_pretrained(
path_raw,
device_map={"": torch.device(device)},
torch_dtype=torch.float32,
low_cpu_mem_usage=True,
)
model_recovered = LlamaRewardModel.from_pretrained(
path_diff,
opt=None,
tokenizer=None,
device_map={"": torch.device(device)},
torch_dtype=torch.float32,
low_cpu_mem_usage=True,
)
state_dict_recovered = model_recovered.state_dict()
state_dict_raw = model_raw.state_dict()
for key in tqdm.tqdm(state_dict_recovered):
print(key)
for key in tqdm.tqdm(state_dict_recovered):
if 'layers' in key:
state_dict_recovered[key].add_(state_dict_raw[key])
if check_integrity_naively:
# This is not a rigorous, cryptographically strong integrity check :)
allsum = sum(state_dict_recovered[key].sum() for key in state_dict_recovered)
assert torch.allclose(
allsum, torch.full_like(allsum, fill_value=49954.0859375), rtol=1e-5, atol=1e-8
), "Naive integrity check failed. This could imply that some of the checkpoint files are corrupted."
print('Check successfully.')
if path_tuned is not None:
model_recovered.save_pretrained(path_tuned)
return model_recovered | Recover the original weights from the released weight diff. This function is given for you to run. Things to do before running this: 1. Convert Meta's released weights into huggingface format. Follow this guide: https://huggingface.co/docs/transformers/main/model_doc/llama 2. Make sure you cloned the released weight diff into your local machine. The weight diff is located at: https://huggingface.co/tatsu-lab/alpaca-7b/tree/main 3. Run this function with the correct paths. E.g., python weight_diff.py recover --path_raw <path_to_step_1_dir> --path_diff <path_to_step_2_dir> Additional notes: - If things run too slowly, and you have an 80G GPU lying around, let GPU go brrr by setting `--device "cuda"`. - If you want to save the recovered weights, set `--path_tuned <your_path_tuned>`. Next time you can load the recovered weights directly from `<your_path_tuned>`. |
14,617 | import argparse
def parse_args():
parser = argparse.ArgumentParser(description='MOSS-RLHF @Fudan NLP Group')
# Path
parser.add_argument('--model_save_path', type=str, default='', help='checkpoint path, used for save model and training')
parser.add_argument('--policy_model_path', type=str, default='', help='policy model and reference model path')
parser.add_argument('--critic_model_path', type=str, default='', help='critic model and reward model path')
parser.add_argument('--tokenizer_name_or_path', type=str, default='/huggingface_models/open-chinese-llama-7b', help='tokenizer name or path')
parser.add_argument('--data_path', type=str, default='./data', help='dataset for training and validation')
parser.add_argument('--logdir', type=str, default=None, help='path to save tensorboard logs')
# Training
parser.add_argument('--lr', type=float, default=5e-7, help='learning rate of policy model')
parser.add_argument('--critic_lr', type=float, default=15e-7, help='learning rate of critic model')
parser.add_argument('--seed', type=int, default=42, help='seed')
parser.add_argument('--batch_size', type=int, default=32, help='training batch size, *NOT* for sampling from env')
parser.add_argument('--train_steps', type=int, default=5000, help='train steps')
parser.add_argument('--warmup_steps', type=int, default=500, help='warmup steps')
parser.add_argument('--save_per_step', type=int, default=100, help='save ckpt per steps')
parser.add_argument('--beta1', type=float, default=0.9, help='adam')
parser.add_argument('--beta2', type=float, default=0.95, help='adam')
parser.add_argument('--eps', type=float, default=1e-6, help='optimizer')
parser.add_argument('--num_workers', type=int, default=1, help='dataloader')
parser.add_argument('--num_prefetch', type=int, default=32, help='dataloader')
parser.add_argument('--maxlen_prompt', type=int, default=2048, help='max len for training, including model prompt and response')
parser.add_argument('--gradient_checkpoint', action='store_true', help='deepspeed')
# PPO in LLMs
parser.add_argument('--num_rollouts', type=int, default=128, help='nums of samples in current replay buffer')
parser.add_argument('--rollout_batch_size', type=int, default=32, help='batch size of sampling from env')
parser.add_argument('--ppo_pretrain_data_path', type=str, default='', help='dataset folder path for pertrain loss of step3: rlhf')
parser.add_argument('--ppo_pretrain_data_type', type=str, default='sft', choices=['sft', 'pretrain'], help='dataset folder path for pertrain loss of step3: rlhf')
parser.add_argument('--ppo_pretrain_batch_size_ratio', type=int, default=1, help='ppo batch size ratio')
parser.add_argument('--ppo_pretrain_loss_weight', type=float, default=0., help='add pretrain loss in PPO training: ppo-rtx')
parser.add_argument('--kl_penalty_weight', type=float, default=0.02, help='kl penalty')
parser.add_argument('--advantage_clip', type=float, default=0.5, help='clip advantage')
parser.add_argument('--vf_loss_weight', type=float, default=1., help='vf loss weight')
parser.add_argument('--entropy_loss_weight', type=float, default=0., help='entropy loss weight')
parser.add_argument('--reward_clip', type=float, default=10., help='reward clip')
parser.add_argument('--entropy_clip', type=float, default=35., help='entropy loss clip')
parser.add_argument('--pg_clip', type=float, default=0.2, help='pg loss clip')
parser.add_argument('--value_clip', type=float, default=0.2, help='value clip for critic model')
parser.add_argument('--gamma', type=float, default=1., help='GAE in PPO')
parser.add_argument('--lam', type=float, default=0.95, help='GAE in PPO')
# Trick and method options for PPO
parser.add_argument('--use_reward_clip', action='store_true', help='use reward clip')
parser.add_argument('--use_reward_scaling', action='store_true', help='use reward scaling')
parser.add_argument('--use_reward_norm', action='store_true', help='user reward norm')
parser.add_argument('--use_critic_loss_clip', action='store_true', help='use critic loss clip')
parser.add_argument('--use_policy_loss_clip', action='store_true', help='use policy loss clip')
parser.add_argument('--use_advantage_norm', action='store_true', help='use advantage norm')
parser.add_argument('--use_advantage_clip', action='store_true', help='use advantage clip')
parser.add_argument('--use_ppo_pretrain_loss', action='store_true', help='use ppo pretrain loss')
parser.add_argument('--use_entropy_loss', action='store_true', help='use ppo entropy loss')
# Sample from env
parser.add_argument('--maxlen_res', type=int, default=128, help='max len for model response')
parser.add_argument('--temperature', type=float, default=0.8, help='temperature')
parser.add_argument('--repetition_penalty', type=float, default=1.1, help='repetition penalty')
parser.add_argument('--topp', type=float, default=0.9, help='nucleus sampling')
# Option for language
parser.add_argument('--lang', type=str, choices=['zh', 'en'], help='special prompt choice for PPO-max-zh or PPO-max-en')
opt = parser.parse_args()
return opt | null |
14,618 | import torch
import torch.nn.functional as F
import logging
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from typing import Tuple, Callable
accelerator = None
def setup_accelerator():
global accelerator
if accelerator is None:
accelerator = Accelerator(split_batches=True)
return accelerator | null |
14,619 | import torch
import torch.nn.functional as F
import logging
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from typing import Tuple, Callable
accelerator = None
def synchronize_if_distributed():
if accelerator.use_distributed:
accelerator.wait_for_everyone() | null |
14,620 | import torch
import torch.nn.functional as F
import logging
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from typing import Tuple, Callable
accelerator = None
def synchronize_forward_on_stage3(done: bool, fake_forward_fn: Callable, **kwargs):
# synchronize to avoid deadlock on deepspeed stage3. do not call this if zero-3 is disabled
# https://github.com/microsoft/DeepSpeed/issues/860
if done:
sync = 1.
while sync > 1e-5:
fake_forward_fn(**kwargs)
sync = torch.tensor(0., device=accelerator.device)
sync = accelerator.reduce(sync).item()
else:
sync = torch.tensor(1., device=accelerator.device)
sync = accelerator.reduce(sync) | null |
14,621 | import torch
import torch.nn.functional as F
import logging
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from typing import Tuple, Callable
accelerator = None
def to_cuda(batch):
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(accelerator.device, non_blocking=True) | null |
14,622 | import torch
import torch.nn.functional as F
import logging
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from typing import Tuple, Callable
def get_eval_ds_config(offload=None, stage=3):
deepspeed_states = AcceleratorState().deepspeed_plugin
device = "cpu" if offload else "none"
zero_opt_dict = {
"stage": stage,
"stage3_param_persistence_threshold": 1e4,
"offload_param": {
"device": device
}
}
return {
"train_micro_batch_size_per_gpu": deepspeed_states.deepspeed_config['train_micro_batch_size_per_gpu'],
"steps_per_print": 10,
"zero_optimization": zero_opt_dict,
"bf16": {
"enabled": True
},
"gradient_clipping": 1.0,
"prescale_gradients": False,
"wall_clock_breakdown": False
} | null |
14,623 | import torch
import torch.nn.functional as F
import logging
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from typing import Tuple, Callable
def get_global_statistics(accelerator, xs: torch.Tensor, mask=None, device='cpu') -> Tuple[float, float, int]:
"""
Computes element-wise mean and variance of the tensor across processes
https://github.com/microsoft/LMOps/blob/cde1fb1ef4608a7ac5bf00675fa3e94b1d960abb/minillm/minillm/utils.py#L108
"""
xs = xs.to(accelerator.device)
sum_and_count = torch.tensor([xs.sum(), (xs.numel() if mask is None else mask.sum())], device=xs.device)
sum_and_count = accelerator.reduce(sum_and_count)
global_sum, count = sum_and_count
global_mean = global_sum / count
sum_var = torch.sum(((xs - global_mean) ** 2).mul(1 if mask is None else mask))
sum_var = accelerator.reduce(sum_var)
global_var = sum_var / count
return global_mean.to(device), global_var.to(device), count.to(device)
The provided code snippet includes necessary dependencies for implementing the `whiten` function. Write a Python function `def whiten(xs: torch.Tensor, mask: torch.BoolTensor, shift_mean=True, accelerator=None) -> torch.Tensor` to solve the following problem:
Whitens values
Here is the function:
def whiten(xs: torch.Tensor, mask: torch.BoolTensor, shift_mean=True, accelerator=None) -> torch.Tensor:
"""
Whitens values
"""
if accelerator != None and accelerator.use_distributed:
mean, var, _ = get_global_statistics(accelerator, xs, mask=mask, device=accelerator.device)
else:
mean = xs.sum() / mask.sum()
var = torch.sum(((xs - mean) ** 2).mul(mask)) / mask.sum()
whitened = (xs - mean) * torch.rsqrt(var + 1e-6)
if not shift_mean:
whitened += mean
return whitened | Whitens values |
14,624 | import torch
import torch.nn.functional as F
import logging
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from typing import Tuple, Callable
The provided code snippet includes necessary dependencies for implementing the `top_p_logits` function. Write a Python function `def top_p_logits(logits, topp=0.9, filter_value=0, min_topk=1)` to solve the following problem:
Filter a distribution of logits using nucleus (top-p) filtering https://github.com/OpenLMLab/MOSS/blob/e088f438d1a95d424c6dffef0d73134ebe62cb72/models_jittor/generation.py#L146
Here is the function:
def top_p_logits(logits, topp=0.9, filter_value=0, min_topk=1):
"""
Filter a distribution of logits using nucleus (top-p) filtering
https://github.com/OpenLMLab/MOSS/blob/e088f438d1a95d424c6dffef0d73134ebe62cb72/models_jittor/generation.py#L146
"""
cum_logits = logits.clone()
if topp > 0:
logits_sorted, inds = torch.sort(logits, dim=-1, descending=True)
mask = (logits_sorted.cumsum(dim=-1) - logits_sorted) >= topp
mask[:, :min_topk] = False
# Remove tokens with cumulative top_p above the threshold
mask = torch.zeros_like(mask).to(torch.bool).scatter_(dim=-1, index=inds, src=mask)
cum_logits[mask] = filter_value
cum_logits.div_(cum_logits.sum(dim=-1, keepdim=True))
return cum_logits | Filter a distribution of logits using nucleus (top-p) filtering https://github.com/OpenLMLab/MOSS/blob/e088f438d1a95d424c6dffef0d73134ebe62cb72/models_jittor/generation.py#L146 |
14,625 | import torch
import torch.nn.functional as F
import logging
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from typing import Tuple, Callable
The provided code snippet includes necessary dependencies for implementing the `logprobs_from_logits` function. Write a Python function `def logprobs_from_logits(logits, labels)` to solve the following problem:
See: https://github.com/pytorch/pytorch/issues/563#issuecomment-330103591
Here is the function:
def logprobs_from_logits(logits, labels):
"""
See: https://github.com/pytorch/pytorch/issues/563#issuecomment-330103591
"""
logp = F.log_softmax(logits, dim=-1)
logpy = torch.gather(logp, dim=-1, index=labels.unsqueeze(-1)).squeeze(-1)
return logpy | See: https://github.com/pytorch/pytorch/issues/563#issuecomment-330103591 |
14,626 | import torch
import torch.nn.functional as F
import logging
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from typing import Tuple, Callable
The provided code snippet includes necessary dependencies for implementing the `get_category_distribution_entropy` function. Write a Python function `def get_category_distribution_entropy(bsz, logits)` to solve the following problem:
Compute category distribution entropy
Here is the function:
def get_category_distribution_entropy(bsz, logits):
"""
Compute category distribution entropy
"""
logits_distribution = torch.distributions.categorical.Categorical(logits=logits.reshape(-1, logits.size(-1)))
ent = logits_distribution.entropy().reshape(bsz, -1)
return ent | Compute category distribution entropy |
14,627 | import torch
import torch.nn.functional as F
import logging
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from typing import Tuple, Callable
The provided code snippet includes necessary dependencies for implementing the `pad_sequences` function. Write a Python function `def pad_sequences(seqs, pad_value, padding='right', pad_to: int=None)` to solve the following problem:
Padding sequence to the same length
Here is the function:
def pad_sequences(seqs, pad_value, padding='right', pad_to: int=None):
"""
Padding sequence to the same length
"""
max_len = max(len(seq) for seq in seqs) if pad_to is None else pad_to
if padding == 'right':
padded_seqs = [seq + [pad_value] * (max_len - len(seq)) for seq in seqs]
elif padding == 'left':
padded_seqs = [[pad_value] * (max_len - len(seq)) + seq for seq in seqs]
else:
assert ValueError
return padded_seqs | Padding sequence to the same length |
14,628 | import argparse
def parse_args(*args):
parser = argparse.ArgumentParser(description='MOSS-RLHF Reward Model @Fudan NLP Group')
# training settings
parser.add_argument('--seed', type=int, default=42, help='seed')
parser.add_argument('--lr', type=float, default=5e-6, help='learning rate of reward model')
parser.add_argument('--batch_size', type=int, default=8, help='training batch size for single GPU')
parser.add_argument('--gradient_checkpoint', action='store_true', help='deepspeed')
parser.add_argument('--reward_lm_loss_factor', type=float, default=0., help='calculate lm loss on rm model')
parser.add_argument('--warmup_steps', type=int, default=500, help='warmup steps')
parser.add_argument('--train_steps', type=int, default=10000, help='train steps')
parser.add_argument('--fp32_loss', action='store_true', help='use fp32 to calculate cross-entropy loss, enable when numeric stability problem occurs')
parser.add_argument('--save_per_step', type=int, default=200, help='save ckpt and save validation tensorboard per steps')
parser.add_argument('--print_interval', type=int, default=5, help='print training state and save training tensorboard per steps')
parser.add_argument('--validation_metric', type=str, default='loss', help='metric to select the best model')
# Optimizer , Scheduler and Dataloader
parser.add_argument('--beta1', type=float, default=0.9, help='adam')
parser.add_argument('--beta2', type=float, default=0.95, help='adam')
parser.add_argument('--eps', type=float, default=1e-6, help='optimizer')
parser.add_argument('--num_prefetch', type=int, default=32, help='dataloader')
parser.add_argument('--num_workers', type=int, default=1, help='dataloader')
parser.add_argument('--weight_decay', type=float, default=0., help='l2 weight decay')
# Path
parser.add_argument('--data_path', type=str, default='./data', help='dataset for training and validation')
parser.add_argument('--init_checkpoint_model', type=str, default=None, help='checkpoint used to initialize the model, used for fine-tuning')
parser.add_argument('--logdir', type=str, default=None, help='path to save tensorboard logs')
parser.add_argument('--model_save_path', type=str, default='./outputs/', help='checkpoint path, used for save model and training')
parser.add_argument('--hf_model_name_or_path', type=str, default='meta-llama/Llama-2-7b-hf', help='Hugging model name used to load tokenizer, configs and pretained models')
# LLM settings
parser.add_argument('--context_truncate', type=int, default=2048, help='max length for history')
parser.add_argument('--delimiter', type=str, default='\n', help='delimiter to seperate dialog history')
args = parser.parse_args()
return args | null |
14,629 | import os
import random
import logging
import torch
import json
import copy
from typing import List, Dict, Any, Tuple
from transformers.models.llama.tokenization_llama import LlamaTokenizer
from torch.utils.data import get_worker_info, IterableDataset
from utils import print_rank_0, pad_sequences
def get_human_prompt(opt):
return "<|Human|>" if opt.lang == 'zh' else "Human:"
def get_assistant_prompt(opt):
return "<|MOSS|>" if opt.lang == 'zh' else "Assistant:"
def print_rank_0(info, only_on_cuda0=False):
if accelerator and not accelerator.is_main_process:
return
if only_on_cuda0 and info not in histroy_logs:
histroy_logs.add(info)
logging.info(info)
return
def get_tokenizer(opt):
print_rank_0(f"Loading tokenizer from huggingface: {opt.tokenizer_name_or_path}...", only_on_cuda0=True)
tokenizer = LlamaTokenizer.from_pretrained(opt.tokenizer_name_or_path, trust_remote_code=True)
tokenizer.bos_token = '<s>'
tokenizer.eos_token = '</s>'
tokenizer.pad_token = '<unk>'
tokenizer.pad_token_id = 0
tokenizer.unk_token = tokenizer.pad_token
tokenizer.unk_token_id = tokenizer.pad_token_id
# only zh need special tokens
if opt.lang == 'zh':
tokenizer.add_special_tokens({"additional_special_tokens": [get_human_prompt(opt), get_assistant_prompt(opt)]})
print_rank_0(f"Llama tokenizer size: {tokenizer.vocab_size}", only_on_cuda0=True)
print_rank_0(f"Llama tokenizer pad token: {tokenizer.pad_token}, pad_token_id: {tokenizer.pad_token_id}", only_on_cuda0=True)
print_rank_0(f"Llama tokenizer. special token: {tokenizer.special_tokens_map}", only_on_cuda0=True)
return tokenizer | null |
14,630 | import os
import random
import logging
import torch
import json
import copy
from typing import List, Dict, Any, Tuple
from transformers.models.llama.tokenization_llama import LlamaTokenizer
from torch.utils.data import get_worker_info, IterableDataset
from utils import print_rank_0, pad_sequences
def get_human_prompt(opt):
return "<|Human|>" if opt.lang == 'zh' else "Human:"
def get_assistant_prompt(opt):
return "<|MOSS|>" if opt.lang == 'zh' else "Assistant:"
def get_special_prompt(i, opt):
return get_human_prompt(opt) if i % 2 == 0 else get_assistant_prompt(opt) | null |
14,631 | import os
import random
import logging
import torch
import json
import copy
from typing import List, Dict, Any, Tuple
from transformers.models.llama.tokenization_llama import LlamaTokenizer
from torch.utils.data import get_worker_info, IterableDataset
from utils import print_rank_0, pad_sequences
def get_human_prompt(opt):
return "<|Human|>" if opt.lang == 'zh' else "Human:"
def get_assistant_prompt(opt):
return "<|MOSS|>" if opt.lang == 'zh' else "Assistant:"
def get_model_prompt(context: List[str], eos_token="</s>", opt=None):
human_prompt, assistant_prompt = get_human_prompt(opt), get_assistant_prompt(opt)
if context[-1].startswith(human_prompt):
end_prompt = assistant_prompt
elif context[-1].startswith(assistant_prompt):
end_prompt = human_prompt
else:
raise ValueError
context = eos_token.join(context)
return f'{context}{eos_token}{end_prompt}' | null |
14,632 | from typing import Optional
import fire
import torch
import tqdm
import transformers
from train_ppo import LlamaRewardModel, Llama
class Llama(LlamaForCausalLM):
def __init__(self, config, opt, tokenizer):
super().__init__(config)
self.opt = opt
self.tokenizer = tokenizer
def forward(self, decoder_input, incr_state=None):
attention_mask = decoder_input.ne(self.tokenizer.pad_token_id)
if incr_state is not None:
decoder_input = decoder_input[:, -1:]
output = super().forward(
input_ids=decoder_input,
attention_mask=attention_mask,
past_key_values=incr_state,
return_dict=True,
use_cache=not self.training
)
logits = output.logits
new_incr_states = output.past_key_values
return logits, new_incr_states
def generate(self, batch, **kwargs):
"""
Generate response
"""
maxlen_res = kwargs.pop('maxlen_res', self.opt.maxlen_res)
temperature = kwargs.pop('temperature', self.opt.temperature)
repetition_penalty = kwargs.pop('repetition_penalty', self.opt.repetition_penalty)
topp = kwargs.pop('topp', self.opt.topp)
decoder_input: torch.LongTensor = batch['text_vec'] # (bsz, ...)
assert decoder_input[:, -1].ne(self.tokenizer.pad_token_id).all(), 'Last token should not be a padding token (you can use left padding instead).'
dev = decoder_input.device
bsz = decoder_input.size(0)
scores = torch.zeros((bsz,), device=dev, dtype=torch.float16)
done = torch.zeros((bsz,), device=dev).to(torch.bool)
inds = torch.arange(bsz).to(dev).unsqueeze(1).view(-1)
decoder_input = torch.index_select(decoder_input, 0, inds)
init_length = decoder_input.size(1)
incr_state = None
for _token in range(maxlen_res):
if done.all():
break
score, incr_state, *_ = self.forward(decoder_input, incr_state)
score = score.half()
# now score is bs, len, vocab_size
score = score[:, -1, :]
# calculate repetition penalty
if repetition_penalty > 1.:
penalty_tokens = decoder_input[:, init_length:]
penalty_scores = torch.gather(score, dim=1, index=penalty_tokens)
penalty_scores = torch.where(penalty_scores < 0., penalty_scores * repetition_penalty, penalty_scores / repetition_penalty)
score = score.scatter_(dim=1, index=penalty_tokens, src=penalty_scores)
# nucleus sampling
score = torch.softmax(score.div(temperature), dim=-1)
probs = top_p_logits(score, topp=topp, filter_value=0)
tok_ids = torch.multinomial(probs, 1)[:, 0]
hyp_ids = torch.arange(probs.size(0), device=dev)
scores = scores + probs[hyp_ids, tok_ids].log() * ~done
tok_ids = torch.where(done, self.tokenizer.pad_token_id, tok_ids)
decoder_input = torch.cat((decoder_input, tok_ids.unsqueeze(-1)), dim=-1)
done = done | tok_ids.eq(self.tokenizer.eos_token_id)
incr_state = self._reorder_cache(incr_state, hyp_ids)
# get all finalized candidates for each sample
decoder_input = decoder_input[:, init_length:]
decoder_input = decoder_input.view(bsz, -1)
scores = scores.view(bsz, )
lengths = decoder_input.ne(self.tokenizer.pad_token_id).sum(dim=-1)
length_penalty = torch.pow(lengths, 1.0)
scores /= length_penalty
preds_scores = []
for i in range(bsz):
seq: torch.LongTensor = decoder_input[i, :lengths[i, ]]
res_scores = (float(scores[i, ]), seq.tolist())
preds_scores.append([res_scores])
best_preds_scores = [preds[0] for preds in preds_scores]
return best_preds_scores, preds_scores
class LlamaRewardModel(LlamaForCausalLM):
def __init__(self, config, opt, tokenizer):
super().__init__(config)
self.opt = opt
self.tokenizer = tokenizer
self.reward_head = torch.nn.Linear(config.hidden_size, 1, bias=False)
def forward(self, decoder_input, only_last=True):
attention_mask = decoder_input.ne(self.tokenizer.pad_token_id)
output = self.model.forward(
input_ids=decoder_input,
attention_mask=attention_mask,
return_dict=True,
use_cache=False
)
if only_last:
logits = self.reward_head(output.last_hidden_state[:, -1, :]).squeeze(-1)
else:
logits = self.reward_head(output.last_hidden_state).squeeze(-1)
return (logits,)
The provided code snippet includes necessary dependencies for implementing the `make_diff` function. Write a Python function `def make_diff( path_raw: str, path_tuned: str, path_diff: str, model_type: str = None, device="cpu", # "cuda" or "cpu" )` to solve the following problem:
Make the weight diff. This function is given to present full transparency of how the weight diff was created. Run: python weight_diff.py make_diff --path_raw decapoda-research/llama-7b-hf --path_tuned <your_path_tuned> --path_diff <your_path_diff> --model_type
Here is the function:
def make_diff(
path_raw: str, path_tuned: str, path_diff: str, model_type: str = None, device="cpu", # "cuda" or "cpu"
):
"""Make the weight diff.
This function is given to present full transparency of how the weight diff was created.
Run:
python weight_diff.py make_diff --path_raw decapoda-research/llama-7b-hf --path_tuned <your_path_tuned> --path_diff <your_path_diff> --model_type
"""
if model_type == 'reward':
model_tuned = LlamaRewardModel.from_pretrained(
path_tuned,
opt=None,
tokenizer=None,
device_map={"": torch.device(device)},
torch_dtype=torch.float32,
low_cpu_mem_usage=True,
)
elif model_type == 'sft':
model_tuned = Llama.from_pretrained(
path_tuned,
opt=None,
tokenizer=None,
device_map={"": torch.device(device)},
torch_dtype=torch.float32,
low_cpu_mem_usage=True,
)
elif model_type == 'policy':
model_tuned = Llama.from_pretrained(
path_tuned,
opt=None,
tokenizer=None,
device_map={"": torch.device(device)},
torch_dtype=torch.float32,
low_cpu_mem_usage=True,
)
model_raw = transformers.AutoModelForCausalLM.from_pretrained(
path_raw,
device_map={"": torch.device(device)},
torch_dtype=torch.float32,
low_cpu_mem_usage=True,
)
state_dict_tuned = model_tuned.state_dict()
state_dict_raw = model_raw.state_dict()
for key in tqdm.tqdm(state_dict_tuned):
print(key)
# en-reward-model 50810.703125
# en-sft-model 50874.84765625
# en-policy-model
check_allsum = sum(state_dict_tuned[key].sum() for key in state_dict_tuned)
print(f'check sum is {check_allsum}')
for key in tqdm.tqdm(state_dict_tuned):
if 'layers' in key:
state_dict_tuned[key].add_(-state_dict_raw[key])
model_tuned.save_pretrained(path_diff) | Make the weight diff. This function is given to present full transparency of how the weight diff was created. Run: python weight_diff.py make_diff --path_raw decapoda-research/llama-7b-hf --path_tuned <your_path_tuned> --path_diff <your_path_diff> --model_type |
14,633 | from typing import Optional
import fire
import torch
import tqdm
import transformers
from train_ppo import LlamaRewardModel, Llama
class Llama(LlamaForCausalLM):
def __init__(self, config, opt, tokenizer):
super().__init__(config)
self.opt = opt
self.tokenizer = tokenizer
def forward(self, decoder_input, incr_state=None):
attention_mask = decoder_input.ne(self.tokenizer.pad_token_id)
if incr_state is not None:
decoder_input = decoder_input[:, -1:]
output = super().forward(
input_ids=decoder_input,
attention_mask=attention_mask,
past_key_values=incr_state,
return_dict=True,
use_cache=not self.training
)
logits = output.logits
new_incr_states = output.past_key_values
return logits, new_incr_states
def generate(self, batch, **kwargs):
"""
Generate response
"""
maxlen_res = kwargs.pop('maxlen_res', self.opt.maxlen_res)
temperature = kwargs.pop('temperature', self.opt.temperature)
repetition_penalty = kwargs.pop('repetition_penalty', self.opt.repetition_penalty)
topp = kwargs.pop('topp', self.opt.topp)
decoder_input: torch.LongTensor = batch['text_vec'] # (bsz, ...)
assert decoder_input[:, -1].ne(self.tokenizer.pad_token_id).all(), 'Last token should not be a padding token (you can use left padding instead).'
dev = decoder_input.device
bsz = decoder_input.size(0)
scores = torch.zeros((bsz,), device=dev, dtype=torch.float16)
done = torch.zeros((bsz,), device=dev).to(torch.bool)
inds = torch.arange(bsz).to(dev).unsqueeze(1).view(-1)
decoder_input = torch.index_select(decoder_input, 0, inds)
init_length = decoder_input.size(1)
incr_state = None
for _token in range(maxlen_res):
if done.all():
break
score, incr_state, *_ = self.forward(decoder_input, incr_state)
score = score.half()
# now score is bs, len, vocab_size
score = score[:, -1, :]
# calculate repetition penalty
if repetition_penalty > 1.:
penalty_tokens = decoder_input[:, init_length:]
penalty_scores = torch.gather(score, dim=1, index=penalty_tokens)
penalty_scores = torch.where(penalty_scores < 0., penalty_scores * repetition_penalty, penalty_scores / repetition_penalty)
score = score.scatter_(dim=1, index=penalty_tokens, src=penalty_scores)
# nucleus sampling
score = torch.softmax(score.div(temperature), dim=-1)
probs = top_p_logits(score, topp=topp, filter_value=0)
tok_ids = torch.multinomial(probs, 1)[:, 0]
hyp_ids = torch.arange(probs.size(0), device=dev)
scores = scores + probs[hyp_ids, tok_ids].log() * ~done
tok_ids = torch.where(done, self.tokenizer.pad_token_id, tok_ids)
decoder_input = torch.cat((decoder_input, tok_ids.unsqueeze(-1)), dim=-1)
done = done | tok_ids.eq(self.tokenizer.eos_token_id)
incr_state = self._reorder_cache(incr_state, hyp_ids)
# get all finalized candidates for each sample
decoder_input = decoder_input[:, init_length:]
decoder_input = decoder_input.view(bsz, -1)
scores = scores.view(bsz, )
lengths = decoder_input.ne(self.tokenizer.pad_token_id).sum(dim=-1)
length_penalty = torch.pow(lengths, 1.0)
scores /= length_penalty
preds_scores = []
for i in range(bsz):
seq: torch.LongTensor = decoder_input[i, :lengths[i, ]]
res_scores = (float(scores[i, ]), seq.tolist())
preds_scores.append([res_scores])
best_preds_scores = [preds[0] for preds in preds_scores]
return best_preds_scores, preds_scores
class LlamaRewardModel(LlamaForCausalLM):
def __init__(self, config, opt, tokenizer):
super().__init__(config)
self.opt = opt
self.tokenizer = tokenizer
self.reward_head = torch.nn.Linear(config.hidden_size, 1, bias=False)
def forward(self, decoder_input, only_last=True):
attention_mask = decoder_input.ne(self.tokenizer.pad_token_id)
output = self.model.forward(
input_ids=decoder_input,
attention_mask=attention_mask,
return_dict=True,
use_cache=False
)
if only_last:
logits = self.reward_head(output.last_hidden_state[:, -1, :]).squeeze(-1)
else:
logits = self.reward_head(output.last_hidden_state).squeeze(-1)
return (logits,)
The provided code snippet includes necessary dependencies for implementing the `recover` function. Write a Python function `def recover( path_raw, path_diff, path_tuned: Optional[str] = None, device="cpu", model_type = None, check_integrity_naively=True, )` to solve the following problem:
Recover the original weights from the released weight diff. This function is given for you to run. Things to do before running this: 1. Convert Meta's released weights into huggingface format. Follow this guide: https://huggingface.co/docs/transformers/main/model_doc/llama 2. Make sure you cloned the released weight diff into your local machine. The weight diff is located at: https://huggingface.co/tatsu-lab/alpaca-7b/tree/main 3. Run this function with the correct paths. E.g., python weight_diff.py recover --path_raw <path_to_step_1_dir> --path_diff <path_to_step_2_dir> Additional notes: - If things run too slowly, and you have an 80G GPU lying around, let GPU go brrr by setting `--device "cuda"`. - If you want to save the recovered weights, set `--path_tuned <your_path_tuned>`. Next time you can load the recovered weights directly from `<your_path_tuned>`.
Here is the function:
def recover(
path_raw,
path_diff,
path_tuned: Optional[str] = None,
device="cpu",
model_type = None,
check_integrity_naively=True,
):
"""Recover the original weights from the released weight diff.
This function is given for you to run.
Things to do before running this:
1. Convert Meta's released weights into huggingface format. Follow this guide:
https://huggingface.co/docs/transformers/main/model_doc/llama
2. Make sure you cloned the released weight diff into your local machine. The weight diff is located at:
https://huggingface.co/tatsu-lab/alpaca-7b/tree/main
3. Run this function with the correct paths. E.g.,
python weight_diff.py recover --path_raw <path_to_step_1_dir> --path_diff <path_to_step_2_dir>
Additional notes:
- If things run too slowly, and you have an 80G GPU lying around, let GPU go brrr by setting `--device "cuda"`.
- If you want to save the recovered weights, set `--path_tuned <your_path_tuned>`.
Next time you can load the recovered weights directly from `<your_path_tuned>`.
"""
model_raw = transformers.AutoModelForCausalLM.from_pretrained(
path_raw,
device_map={"": torch.device(device)},
torch_dtype=torch.float32,
low_cpu_mem_usage=True,
)
if model_type == 'reward':
model_recovered = LlamaRewardModel.from_pretrained(
path_diff,
opt=None,
tokenizer=None,
device_map={"": torch.device(device)},
torch_dtype=torch.float32,
low_cpu_mem_usage=True,
)
fill_value = 50810.703125
elif model_type == 'sft':
model_recovered = Llama.from_pretrained(
path_diff,
opt=None,
tokenizer=None,
device_map={"": torch.device(device)},
torch_dtype=torch.float32,
low_cpu_mem_usage=True,
)
fill_value = 50874.84765625
elif model_type == 'policy':
model_recovered = Llama.from_pretrained(
path_diff,
opt=None,
tokenizer=None,
device_map={"": torch.device(device)},
torch_dtype=torch.float32,
low_cpu_mem_usage=True,
)
fill_value = 0
state_dict_recovered = model_recovered.state_dict()
state_dict_raw = model_raw.state_dict()
for key in tqdm.tqdm(state_dict_recovered):
print(key)
for key in tqdm.tqdm(state_dict_recovered):
if 'layers' in key:
state_dict_recovered[key].add_(state_dict_raw[key])
if check_integrity_naively:
# This is not a rigorous, cryptographically strong integrity check :)
allsum = sum(state_dict_recovered[key].sum() for key in state_dict_recovered)
assert torch.allclose(
allsum, torch.full_like(allsum, fill_value=fill_value), rtol=1e-5, atol=1e-8
), "Naive integrity check failed. This could imply that some of the checkpoint files are corrupted."
print('Check successfully.')
if path_tuned is not None:
model_recovered.save_pretrained(path_tuned, max_shard_size="10GB")
return model_recovered | Recover the original weights from the released weight diff. This function is given for you to run. Things to do before running this: 1. Convert Meta's released weights into huggingface format. Follow this guide: https://huggingface.co/docs/transformers/main/model_doc/llama 2. Make sure you cloned the released weight diff into your local machine. The weight diff is located at: https://huggingface.co/tatsu-lab/alpaca-7b/tree/main 3. Run this function with the correct paths. E.g., python weight_diff.py recover --path_raw <path_to_step_1_dir> --path_diff <path_to_step_2_dir> Additional notes: - If things run too slowly, and you have an 80G GPU lying around, let GPU go brrr by setting `--device "cuda"`. - If you want to save the recovered weights, set `--path_tuned <your_path_tuned>`. Next time you can load the recovered weights directly from `<your_path_tuned>`. |
14,634 | from torch.utils.data import get_worker_info, IterableDataset
from transformers.models.llama.tokenization_llama import LlamaTokenizer
from typing import Dict, Any, List, Tuple, Union, Generator
import json, logging, torch, random
import os
from utils import *
def get_human_prompt():
return "Human:"
def get_assistant_prompt():
return "Assistant:"
def get_separate_prompt(i: int):
return get_human_prompt() if i % 2 == 0 else get_assistant_prompt() | null |
14,635 | from torch.utils.data import get_worker_info, IterableDataset
from transformers.models.llama.tokenization_llama import LlamaTokenizer
from typing import Dict, Any, List, Tuple, Union, Generator
import json, logging, torch, random
import os
from utils import *
def get_tokenizer(opt):
tokenizer_name_or_path = opt.hf_model_name_or_path
print_rank_0(f"Loading tokenizer from huggingface: {tokenizer_name_or_path}...", only_on_cuda0=True)
tokenizer = LlamaTokenizer.from_pretrained(tokenizer_name_or_path, trust_remote_code=True)
tokenizer.bos_token = '<s>'
tokenizer.eos_token = '</s>'
tokenizer.pad_token = '<unk>'
tokenizer.pad_token_id = 0
tokenizer.unk_token = tokenizer.pad_token
tokenizer.unk_token_id = tokenizer.pad_token_id
print_rank_0(f"Llama tokenizer size: {tokenizer.vocab_size}", only_on_cuda0=True)
print_rank_0(f"Llama tokenizer pad token: {tokenizer.pad_token}, pad_token_id: {tokenizer.pad_token_id}", only_on_cuda0=True)
print_rank_0(f"Llama tokenizer. special token: {tokenizer.special_tokens_map}", only_on_cuda0=True)
return tokenizer | null |
14,636 | import argparse
import logging
import math
import os
import random
import time
from pathlib import Path
from threading import Thread
from warnings import warn
import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import test
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.face_datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
print_mutation, set_logging
from utils.google_utils import attempt_download
from utils.loss import compute_loss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first
logger = logging.getLogger(__name__)
def attempt_load(weights, map_location=None): # return ensemble
class Model(nn.Module):
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None):
def forward(self, x, augment=False, profile=False):
def forward_once(self, x, profile=False):
def _initialize_biases(self, cf=None):
def _print_biases(self):
def fuse(self):
def nms(self, mode=True):
def autoshape(self):
def info(self, verbose=False, img_size=640):
def check_anchors(dataset, model, thr=4.0, imgsz=640): # newline
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
def init_seeds(seed=0):
def check_img_size(img_size, s=32):
def check_dataset(dict):
def labels_to_class_weights(labels, nc=80):
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
def strip_optimizer(f='weights/best.pt', s=''):
def attempt_download(file, repo='ultralytics/yolov5'):
def compute_loss(p, targets, model):
def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
def plot_labels(labels, save_dir=Path(''), loggers=None):
def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
def torch_distributed_zero_first(local_rank: int):
def intersect_dicts(da, db, exclude=()):
class ModelEMA:
def __init__(self, model, decay=0.9999, updates=0):
def update(self, model):
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
def train(hyp, opt, device, tb_writer=None, wandb=None):
logger.info(f'Hyperparameters {hyp}')
save_dir, epochs, batch_size, total_batch_size, weights, rank = \
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
# Directories
wdir = save_dir / 'weights'
wdir.mkdir(parents=True, exist_ok=True) # make dir
last = wdir / 'last.pt'
best = wdir / 'best.pt'
results_file = save_dir / 'results.txt'
# Save run settings
with open(save_dir / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
plots = not opt.evolve # create plots
cuda = device.type != 'cpu'
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict['train']
test_path = data_dict['val']
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
# Model
pretrained = weights.endswith('.pt')
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
if hyp.get('anchors'):
ckpt['model'].yaml['anchors'] = round(hyp['anchors']) # force autoanchor
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create
exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else:
model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print('freezing %s' % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf'] # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Logging
if wandb and wandb.run is None:
opt.hyp = hyp # add hyperparameters
wandb_run = wandb.init(config=opt, resume="allow",
project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
name=save_dir.stem,
id=ckpt.get('wandb_id') if 'ckpt' in locals() else None)
loggers = {'wandb': wandb} # loggers dict
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# Results
if ckpt.get('training_results') is not None:
with open(results_file, 'w') as file:
file.write(ckpt['training_results']) # write results.txt
# Epochs
#start_epoch = ckpt['epoch'] + 1
if opt.resume:
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
if epochs < start_epoch:
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = int(max(model.stride)) # grid size (max stride)
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info('Using SyncBatchNorm()')
# EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
# Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
world_size=opt.world_size, workers=opt.workers,
image_weights=opt.image_weights)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
# Process 0
if rank in [-1, 0]:
ema.updates = start_epoch * nb // accumulate # set EMA updates
testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt, # testloader
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True,
rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5)[0]
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, save_dir, loggers)
if tb_writer:
tb_writer.add_histogram('classes', c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
# Model parameters
hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
logger.info('Image sizes %g train, %g test\n'
'Using %g dataloader workers\nLogging results to %s\n'
'Starting training for %g epochs...' % (imgsz, imgsz_test, dataloader.num_workers, save_dir, epochs))
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(5, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(('\n' + '%10s' * 9) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'landmark', 'total', 'targets', 'img_size'))
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device), model) # loss scaled by batch_size
if rank != -1:
loss *= opt.world_size # gradient averaged between devices in DDP mode
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.4g' * 7) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
pbar.set_description(s)
# Plot
if plots and ni < 3:
f = save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
# if tb_writer:
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
elif plots and ni == 3 and wandb:
wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')]})
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0] and epoch > 20:
# mAP
if ema:
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
plots=False,
log_imgs=opt.log_imgs if wandb else 0)
# Write
with open(results_file, 'a') as f:
f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
if len(opt.name) and opt.bucket:
os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
# Log
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb:
wandb.log({tag: x}) # W&B
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save:
with open(results_file, 'r') as f: # create checkpoint
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': f.read(),
'model': ema.ema,
'optimizer': None if final_epoch else optimizer.state_dict(),
'wandb_id': wandb_run.id if wandb else None}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
final = best if best.exists() else last # final model
for f in [last, best]:
if f.exists():
strip_optimizer(f) # strip optimizers
if opt.bucket:
os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
# Plots
if plots:
plot_results(save_dir=save_dir) # save as results.png
if wandb:
files = ['results.png', 'precision_recall_curve.png', 'confusion_matrix.png']
wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files
if (save_dir / f).exists()]})
if opt.log_artifacts:
wandb.log_artifact(artifact_or_path=str(final), type='model', name=save_dir.stem)
# Test best.pt
logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
for conf, iou, save_json in ([0.25, 0.45, False], [0.001, 0.65, True]): # speed, mAP tests
results, _, _ = test.test(opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
conf_thres=conf,
iou_thres=iou,
model=attempt_load(final, device).half(),
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=save_json,
plots=False)
else:
dist.destroy_process_group()
wandb.run.finish() if wandb and wandb.run else None
torch.cuda.empty_cache()
return results | null |
14,637 | import os
import cv2
import numpy as np
import shutil
import sys
from tqdm import tqdm
def xywh2xxyy(box):
x1 = box[0]
y1 = box[1]
x2 = box[0] + box[2]
y2 = box[1] + box[3]
return x1, x2, y1, y2
def convert(size, box):
dw = 1. / (size[0])
dh = 1. / (size[1])
x = (box[0] + box[1]) / 2.0 - 1
y = (box[2] + box[3]) / 2.0 - 1
w = box[1] - box[0]
h = box[3] - box[2]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return x, y, w, h
def wider2face(root, phase='val', ignore_small=0):
data = {}
with open('{}/{}/label.txt'.format(root, phase), 'r') as f:
lines = f.readlines()
for line in tqdm(lines):
line = line.strip()
if '#' in line:
path = '{}/{}/images/{}'.format(root, phase, line.split()[-1])
img = cv2.imread(path)
height, width, _ = img.shape
data[path] = list()
else:
box = np.array(line.split()[0:4], dtype=np.float32) # (x1,y1,w,h)
if box[2] < ignore_small or box[3] < ignore_small:
continue
box = convert((width, height), xywh2xxyy(box))
label = '0 {} {} {} {} -1 -1 -1 -1 -1 -1 -1 -1 -1 -1'.format(round(box[0], 4), round(box[1], 4),
round(box[2], 4), round(box[3], 4))
data[path].append(label)
return data | null |
14,638 | import os.path
import sys
import torch
import torch.utils.data as data
import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `detection_collate` function. Write a Python function `def detection_collate(batch)` to solve the following problem:
Custom collate fn for dealing with batches of images that have a different number of associated object annotations (bounding boxes). Arguments: batch: (tuple) A tuple of tensor images and lists of annotations Return: A tuple containing: 1) (tensor) batch of images stacked on their 0 dim 2) (list of tensors) annotations for a given image are stacked on 0 dim
Here is the function:
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
targets = []
imgs = []
for _, sample in enumerate(batch):
for _, tup in enumerate(sample):
if torch.is_tensor(tup):
imgs.append(tup)
elif isinstance(tup, type(np.empty(0))):
annos = torch.from_numpy(tup).float()
targets.append(annos)
return torch.stack(imgs, 0), targets | Custom collate fn for dealing with batches of images that have a different number of associated object annotations (bounding boxes). Arguments: batch: (tuple) A tuple of tensor images and lists of annotations Return: A tuple containing: 1) (tensor) batch of images stacked on their 0 dim 2) (list of tensors) annotations for a given image are stacked on 0 dim |
14,640 | import argparse
import time
from pathlib import Path
import sys
import os
import numpy as np
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
import copy
from models.experimental import attempt_load
from utils.datasets import letterbox, img_formats, vid_formats, LoadImages, LoadStreams
from utils.general import check_img_size, non_max_suppression_face, apply_classifier, scale_coords, xyxy2xywh, \
strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
def attempt_load(weights, map_location=None):
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
attempt_download(w)
model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model
# Compatibility updates
for m in model.modules():
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
m.inplace = True # pytorch 1.7.0 compatibility
elif type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if len(model) == 1:
return model[-1] # return model
else:
print('Ensemble created with %s\n' % weights)
for k in ['names', 'stride']:
setattr(model, k, getattr(model[-1], k))
return model # return ensemble
def load_model(weights, device):
model = attempt_load(weights, map_location=device) # load FP32 model
return model | null |
14,641 | import argparse
import time
from pathlib import Path
import sys
import os
import numpy as np
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
import copy
from models.experimental import attempt_load
from utils.datasets import letterbox, img_formats, vid_formats, LoadImages, LoadStreams
from utils.general import check_img_size, non_max_suppression_face, apply_classifier, scale_coords, xyxy2xywh, \
strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None):
def show_results(img, xyxy, conf, landmarks, class_num):
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng']
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv']
class LoadImages:
def __init__(self, path, img_size=640):
def __iter__(self):
def __next__(self):
def new_video(self, path):
def __len__(self): # number of files
class LoadStreams:
def __init__(self, sources='streams.txt', img_size=640):
def update(self, index, cap):
def __iter__(self):
def __next__(self):
def __len__(self): # 1E12 frames = 32 streams at 30 FPS for 30 years
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
def check_img_size(img_size, s=32):
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
def non_max_suppression_face(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
def increment_path(path, exist_ok=True, sep=''): # update path
def detect(
model,
source,
device,
project,
name,
exist_ok,
save_img,
view_img
):
# Load model
img_size = 640
conf_thres = 0.6
iou_thres = 0.5
imgsz=(640, 640)
# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
Path(save_dir).mkdir(parents=True, exist_ok=True) # make dir
is_file = Path(source).suffix[1:] in (img_formats + vid_formats)
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
# Dataloader
if webcam:
print('loading streams:', source)
dataset = LoadStreams(source, img_size=imgsz)
bs = 1 # batch_size
else:
print('loading images', source)
dataset = LoadImages(source, img_size=imgsz)
bs = 1 # batch_size
vid_path, vid_writer = [None] * bs, [None] * bs
for path, im, im0s, vid_cap in dataset:
if len(im.shape) == 4:
orgimg = np.squeeze(im.transpose(0, 2, 3, 1), axis= 0)
else:
orgimg = im.transpose(1, 2, 0)
orgimg = cv2.cvtColor(orgimg, cv2.COLOR_BGR2RGB)
img0 = copy.deepcopy(orgimg)
h0, w0 = orgimg.shape[:2] # orig hw
r = img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR
img0 = cv2.resize(img0, (int(w0 * r), int(h0 * r)), interpolation=interp)
imgsz = check_img_size(img_size, s=model.stride.max()) # check img_size
img = letterbox(img0, new_shape=imgsz)[0]
# Convert from w,h,c to c,w,h
img = img.transpose(2, 0, 1).copy()
img = torch.from_numpy(img).to(device)
img = img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = model(img)[0]
# Apply NMS
pred = non_max_suppression_face(pred, conf_thres, iou_thres)
print(len(pred[0]), 'face' if len(pred[0]) == 1 else 'faces')
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, im0, frame = path[i], im0s[i].copy(), dataset.count
else:
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
p = Path(p) # to Path
save_path = str(Path(save_dir) / p.name) # im.jpg
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
det[:, 5:15] = scale_coords_landmarks(img.shape[2:], det[:, 5:15], im0.shape).round()
for j in range(det.size()[0]):
xyxy = det[j, :4].view(-1).tolist()
conf = det[j, 4].cpu().numpy()
landmarks = det[j, 5:15].view(-1).tolist()
class_num = det[j, 15].cpu().numpy()
im0 = show_results(im0, xyxy, conf, landmarks, class_num)
if view_img:
cv2.imshow('result', im0)
k = cv2.waitKey(1)
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
cv2.imwrite(save_path, im0)
else: # 'video' or 'stream'
if vid_path[i] != save_path: # new video
vid_path[i] = save_path
if isinstance(vid_writer[i], cv2.VideoWriter):
vid_writer[i].release() # release previous video writer
if vid_cap: # video
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
try:
vid_writer[i].write(im0)
except Exception as e:
print(e) | null |
14,642 | import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
import numpy as np
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
def GiB(val):
return val * 1 << 30
The provided code snippet includes necessary dependencies for implementing the `ONNX_to_TRT` function. Write a Python function `def ONNX_to_TRT(onnx_model_path=None,trt_engine_path=None,fp16_mode=False)` to solve the following problem:
仅适用TensorRT V8版本 生成cudaEngine,并保存引擎文件(仅支持固定输入尺度) fp16_mode: True则fp16预测 onnx_model_path: 将加载的onnx权重路径 trt_engine_path: trt引擎文件保存路径
Here is the function:
def ONNX_to_TRT(onnx_model_path=None,trt_engine_path=None,fp16_mode=False):
"""
仅适用TensorRT V8版本
生成cudaEngine,并保存引擎文件(仅支持固定输入尺度)
fp16_mode: True则fp16预测
onnx_model_path: 将加载的onnx权重路径
trt_engine_path: trt引擎文件保存路径
"""
builder = trt.Builder(TRT_LOGGER)
network = builder.create_network(EXPLICIT_BATCH)
parser = trt.OnnxParser(network, TRT_LOGGER)
config = builder.create_builder_config()
config.max_workspace_size=GiB(1)
if fp16_mode:
config.set_flag(trt.BuilderFlag.FP16)
with open(onnx_model_path, 'rb') as model:
assert parser.parse(model.read())
serialized_engine=builder.build_serialized_network(network, config)
with open(trt_engine_path, 'wb') as f:
f.write(serialized_engine) # 序列化
print('TensorRT file in ' + trt_engine_path)
print('============ONNX->TensorRT SUCCESS============') | 仅适用TensorRT V8版本 生成cudaEngine,并保存引擎文件(仅支持固定输入尺度) fp16_mode: True则fp16预测 onnx_model_path: 将加载的onnx权重路径 trt_engine_path: trt引擎文件保存路径 |
14,643 | import os
import sys
import cv2
import copy
import torch
import argparse
from utils.datasets import letterbox
from detect_face import scale_coords_landmarks,show_results
from torch2trt.trt_model import TrtModel
def check_img_size(img_size, s=32):
# Verify img_size is a multiple of stride s
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
if new_size != img_size:
print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
return new_size
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
The provided code snippet includes necessary dependencies for implementing the `img_process` function. Write a Python function `def img_process(img_path,long_side=640,stride_max=32)` to solve the following problem:
图像预处理
Here is the function:
def img_process(img_path,long_side=640,stride_max=32):
'''
图像预处理
'''
orgimg=cv2.imread(img_path)
img0 = copy.deepcopy(orgimg)
h0, w0 = orgimg.shape[:2] # orig hw
r = long_side/ max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR
img0 = cv2.resize(img0, (int(w0 * r), int(h0 * r)), interpolation=interp)
imgsz = check_img_size(long_side, s=stride_max) # check img_size
img = letterbox(img0, new_shape=imgsz,auto=False)[0] # auto True最小矩形 False固定尺度
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1).copy() # BGR to RGB, to 3x416x416
img = torch.from_numpy(img)
img = img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
return img,orgimg | 图像预处理 |
14,644 | import os
import sys
import cv2
import copy
import torch
import argparse
from utils.datasets import letterbox
from detect_face import scale_coords_landmarks,show_results
from torch2trt.trt_model import TrtModel
cur_path=os.path.abspath(os.path.dirname(__file__))
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2, 4, 6, 8]] -= pad[0] # x padding
coords[:, [1, 3, 5, 7, 9]] -= pad[1] # y padding
coords[:, :10] /= gain
#clip_coords(coords, img0_shape)
coords[:, 0].clamp_(0, img0_shape[1]) # x1
coords[:, 1].clamp_(0, img0_shape[0]) # y1
coords[:, 2].clamp_(0, img0_shape[1]) # x2
coords[:, 3].clamp_(0, img0_shape[0]) # y2
coords[:, 4].clamp_(0, img0_shape[1]) # x3
coords[:, 5].clamp_(0, img0_shape[0]) # y3
coords[:, 6].clamp_(0, img0_shape[1]) # x4
coords[:, 7].clamp_(0, img0_shape[0]) # y4
coords[:, 8].clamp_(0, img0_shape[1]) # x5
coords[:, 9].clamp_(0, img0_shape[0]) # y5
return coords
def show_results(img, xyxy, conf, landmarks, class_num):
h,w,c = img.shape
tl = 1 or round(0.002 * (h + w) / 2) + 1 # line/font thickness
x1 = int(xyxy[0])
y1 = int(xyxy[1])
x2 = int(xyxy[2])
y2 = int(xyxy[3])
img = img.copy()
cv2.rectangle(img, (x1,y1), (x2, y2), (0,255,0), thickness=tl, lineType=cv2.LINE_AA)
clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)]
for i in range(5):
point_x = int(landmarks[2 * i])
point_y = int(landmarks[2 * i + 1])
cv2.circle(img, (point_x, point_y), tl+1, clors[i], -1)
tf = max(tl - 1, 1) # font thickness
label = str(conf)[:5]
cv2.putText(img, label, (x1, y1 - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
return img
The provided code snippet includes necessary dependencies for implementing the `img_vis` function. Write a Python function `def img_vis(img,orgimg,pred,vis_thres = 0.6)` to solve the following problem:
预测可视化 vis_thres: 可视化阈值
Here is the function:
def img_vis(img,orgimg,pred,vis_thres = 0.6):
'''
预测可视化
vis_thres: 可视化阈值
'''
print('img.shape: ', img.shape)
print('orgimg.shape: ', orgimg.shape)
no_vis_nums=0
# Process detections
for i, det in enumerate(pred): # detections per image
gn = torch.tensor(orgimg.shape)[[1, 0, 1, 0]] # normalization gain whwh
gn_lks = torch.tensor(orgimg.shape)[[1, 0, 1, 0, 1, 0, 1, 0, 1, 0]] # normalization gain landmarks
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], orgimg.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
det[:, 5:15] = scale_coords_landmarks(img.shape[2:], det[:, 5:15], orgimg.shape).round()
for j in range(det.size()[0]):
if det[j, 4].cpu().numpy() < vis_thres:
no_vis_nums+=1
continue
xywh = (xyxy2xywh(det[j, :4].view(1, 4)) / gn).view(-1).tolist()
conf = det[j, 4].cpu().numpy()
landmarks = (det[j, 5:15].view(1, 10) / gn_lks).view(-1).tolist()
class_num = det[j, 15].cpu().numpy()
orgimg = show_results(orgimg, xywh, conf, landmarks, class_num)
cv2.imwrite(cur_path+'/result.jpg', orgimg)
print('result save in '+cur_path+'/result.jpg') | 预测可视化 vis_thres: 可视化阈值 |
14,645 | from models.experimental import attempt_load
from torch2trt.trt_model import TrtModel
import argparse
import torch
import time
from tqdm import tqdm
def run(model,img,warmup_iter,iter):
print('start warm up...')
for _ in tqdm(range(warmup_iter)):
model(img)
print('start calculate...')
torch.cuda.synchronize()
start = time.time()
for __ in tqdm(range(iter)):
model(img)
torch.cuda.synchronize()
end = time.time()
return ((end - start) * 1000)/float(iter) | null |
14,646 | import os
import tqdm
import pickle
import argparse
import numpy as np
from scipy.io import loadmat
from bbox import bbox_overlaps
from IPython import embed
def get_gt_boxes_from_txt(gt_path, cache_dir):
cache_file = os.path.join(cache_dir, 'gt_cache.pkl')
if os.path.exists(cache_file):
f = open(cache_file, 'rb')
boxes = pickle.load(f)
f.close()
return boxes
f = open(gt_path, 'r')
state = 0
lines = f.readlines()
lines = list(map(lambda x: x.rstrip('\r\n'), lines))
boxes = {}
print(len(lines))
f.close()
current_boxes = []
current_name = None
for line in lines:
if state == 0 and '--' in line:
state = 1
current_name = line
continue
if state == 1:
state = 2
continue
if state == 2 and '--' in line:
state = 1
boxes[current_name] = np.array(current_boxes).astype('float32')
current_name = line
current_boxes = []
continue
if state == 2:
box = [float(x) for x in line.split(' ')[:4]]
current_boxes.append(box)
continue
f = open(cache_file, 'wb')
pickle.dump(boxes, f)
f.close()
return boxes | null |
14,647 | import os
import tqdm
import pickle
import argparse
import numpy as np
from scipy.io import loadmat
from bbox import bbox_overlaps
from IPython import embed
def get_gt_boxes(gt_dir):
""" gt dir: (wider_face_val.mat, wider_easy_val.mat, wider_medium_val.mat, wider_hard_val.mat)"""
gt_mat = loadmat(os.path.join(gt_dir, 'wider_face_val.mat'))
hard_mat = loadmat(os.path.join(gt_dir, 'wider_hard_val.mat'))
medium_mat = loadmat(os.path.join(gt_dir, 'wider_medium_val.mat'))
easy_mat = loadmat(os.path.join(gt_dir, 'wider_easy_val.mat'))
facebox_list = gt_mat['face_bbx_list']
event_list = gt_mat['event_list']
file_list = gt_mat['file_list']
hard_gt_list = hard_mat['gt_list']
medium_gt_list = medium_mat['gt_list']
easy_gt_list = easy_mat['gt_list']
return facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list
def get_preds(pred_dir):
events = os.listdir(pred_dir)
boxes = dict()
pbar = tqdm.tqdm(events)
for event in pbar:
pbar.set_description('Reading Predictions ')
event_dir = os.path.join(pred_dir, event)
event_images = os.listdir(event_dir)
current_event = dict()
for imgtxt in event_images:
imgname, _boxes = read_pred_file(os.path.join(event_dir, imgtxt))
current_event[imgname.rstrip('.jpg')] = _boxes
boxes[event] = current_event
return boxes
def norm_score(pred):
""" norm score
pred {key: [[x1,y1,x2,y2,s]]}
"""
max_score = 0
min_score = 1
for _, k in pred.items():
for _, v in k.items():
if len(v) == 0:
continue
_min = np.min(v[:, -1])
_max = np.max(v[:, -1])
max_score = max(_max, max_score)
min_score = min(_min, min_score)
diff = max_score - min_score
for _, k in pred.items():
for _, v in k.items():
if len(v) == 0:
continue
v[:, -1] = (v[:, -1] - min_score)/diff
def image_eval(pred, gt, ignore, iou_thresh):
""" single image evaluation
pred: Nx5
gt: Nx4
ignore:
"""
_pred = pred.copy()
_gt = gt.copy()
pred_recall = np.zeros(_pred.shape[0])
recall_list = np.zeros(_gt.shape[0])
proposal_list = np.ones(_pred.shape[0])
_pred[:, 2] = _pred[:, 2] + _pred[:, 0]
_pred[:, 3] = _pred[:, 3] + _pred[:, 1]
_gt[:, 2] = _gt[:, 2] + _gt[:, 0]
_gt[:, 3] = _gt[:, 3] + _gt[:, 1]
overlaps = bbox_overlaps(_pred[:, :4], _gt)
for h in range(_pred.shape[0]):
gt_overlap = overlaps[h]
max_overlap, max_idx = gt_overlap.max(), gt_overlap.argmax()
if max_overlap >= iou_thresh:
if ignore[max_idx] == 0:
recall_list[max_idx] = -1
proposal_list[h] = -1
elif recall_list[max_idx] == 0:
recall_list[max_idx] = 1
r_keep_index = np.where(recall_list == 1)[0]
pred_recall[h] = len(r_keep_index)
return pred_recall, proposal_list
def img_pr_info(thresh_num, pred_info, proposal_list, pred_recall):
pr_info = np.zeros((thresh_num, 2)).astype('float')
for t in range(thresh_num):
thresh = 1 - (t+1)/thresh_num
r_index = np.where(pred_info[:, 4] >= thresh)[0]
if len(r_index) == 0:
pr_info[t, 0] = 0
pr_info[t, 1] = 0
else:
r_index = r_index[-1]
p_index = np.where(proposal_list[:r_index+1] == 1)[0]
pr_info[t, 0] = len(p_index)
pr_info[t, 1] = pred_recall[r_index]
return pr_info
def dataset_pr_info(thresh_num, pr_curve, count_face):
_pr_curve = np.zeros((thresh_num, 2))
for i in range(thresh_num):
_pr_curve[i, 0] = pr_curve[i, 1] / pr_curve[i, 0]
_pr_curve[i, 1] = pr_curve[i, 1] / count_face
return _pr_curve
def voc_ap(rec, prec):
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def evaluation(pred, gt_path, iou_thresh=0.5):
pred = get_preds(pred)
norm_score(pred)
facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list = get_gt_boxes(gt_path)
event_num = len(event_list)
thresh_num = 1000
settings = ['easy', 'medium', 'hard']
setting_gts = [easy_gt_list, medium_gt_list, hard_gt_list]
aps = []
for setting_id in range(3):
# different setting
gt_list = setting_gts[setting_id]
count_face = 0
pr_curve = np.zeros((thresh_num, 2)).astype('float')
# [hard, medium, easy]
pbar = tqdm.tqdm(range(event_num))
for i in pbar:
pbar.set_description('Processing {}'.format(settings[setting_id]))
event_name = str(event_list[i][0][0])
img_list = file_list[i][0]
pred_list = pred[event_name]
sub_gt_list = gt_list[i][0]
# img_pr_info_list = np.zeros((len(img_list), thresh_num, 2))
gt_bbx_list = facebox_list[i][0]
for j in range(len(img_list)):
pred_info = pred_list[str(img_list[j][0][0])]
gt_boxes = gt_bbx_list[j][0].astype('float')
keep_index = sub_gt_list[j][0]
count_face += len(keep_index)
if len(gt_boxes) == 0 or len(pred_info) == 0:
continue
ignore = np.zeros(gt_boxes.shape[0])
if len(keep_index) != 0:
ignore[keep_index-1] = 1
pred_recall, proposal_list = image_eval(pred_info, gt_boxes, ignore, iou_thresh)
_img_pr_info = img_pr_info(thresh_num, pred_info, proposal_list, pred_recall)
pr_curve += _img_pr_info
pr_curve = dataset_pr_info(thresh_num, pr_curve, count_face)
propose = pr_curve[:, 0]
recall = pr_curve[:, 1]
ap = voc_ap(recall, propose)
aps.append(ap)
print("==================== Results ====================")
print("Easy Val AP: {}".format(aps[0]))
print("Medium Val AP: {}".format(aps[1]))
print("Hard Val AP: {}".format(aps[2]))
print("=================================================") | null |
14,648 | from pathlib import Path
import torch
from models.yolo import Model
from utils.general import set_logging
from utils.google_utils import attempt_download
def create(name, pretrained, channels, classes, autoshape):
"""Creates a specified YOLOv5 model
Arguments:
name (str): name of model, i.e. 'yolov5s'
pretrained (bool): load pretrained weights into the model
channels (int): number of input channels
classes (int): number of model classes
Returns:
pytorch model
"""
config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path
try:
model = Model(config, channels, classes)
if pretrained:
fname = f'{name}.pt' # checkpoint filename
attempt_download(fname) # download if not found locally
ckpt = torch.load(fname, map_location=torch.device('cpu')) # load
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter
model.load_state_dict(state_dict, strict=False) # load
if len(ckpt['model'].names) == classes:
model.names = ckpt['model'].names # set class names attribute
if autoshape:
model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
return model
except Exception as e:
help_url = 'https://github.com/ultralytics/yolov5/issues/36'
s = 'Cache maybe be out of date, try force_reload=True. See %s for help.' % help_url
raise Exception(s) from e
The provided code snippet includes necessary dependencies for implementing the `yolov5s` function. Write a Python function `def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True)` to solve the following problem:
YOLOv5-small model from https://github.com/ultralytics/yolov5 Arguments: pretrained (bool): load pretrained weights into the model, default=False channels (int): number of input channels, default=3 classes (int): number of model classes, default=80 Returns: pytorch model
Here is the function:
def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True):
"""YOLOv5-small model from https://github.com/ultralytics/yolov5
Arguments:
pretrained (bool): load pretrained weights into the model, default=False
channels (int): number of input channels, default=3
classes (int): number of model classes, default=80
Returns:
pytorch model
"""
return create('yolov5s', pretrained, channels, classes, autoshape) | YOLOv5-small model from https://github.com/ultralytics/yolov5 Arguments: pretrained (bool): load pretrained weights into the model, default=False channels (int): number of input channels, default=3 classes (int): number of model classes, default=80 Returns: pytorch model |
14,649 | from pathlib import Path
import torch
from models.yolo import Model
from utils.general import set_logging
from utils.google_utils import attempt_download
def create(name, pretrained, channels, classes, autoshape):
"""Creates a specified YOLOv5 model
Arguments:
name (str): name of model, i.e. 'yolov5s'
pretrained (bool): load pretrained weights into the model
channels (int): number of input channels
classes (int): number of model classes
Returns:
pytorch model
"""
config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path
try:
model = Model(config, channels, classes)
if pretrained:
fname = f'{name}.pt' # checkpoint filename
attempt_download(fname) # download if not found locally
ckpt = torch.load(fname, map_location=torch.device('cpu')) # load
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter
model.load_state_dict(state_dict, strict=False) # load
if len(ckpt['model'].names) == classes:
model.names = ckpt['model'].names # set class names attribute
if autoshape:
model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
return model
except Exception as e:
help_url = 'https://github.com/ultralytics/yolov5/issues/36'
s = 'Cache maybe be out of date, try force_reload=True. See %s for help.' % help_url
raise Exception(s) from e
The provided code snippet includes necessary dependencies for implementing the `yolov5m` function. Write a Python function `def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True)` to solve the following problem:
YOLOv5-medium model from https://github.com/ultralytics/yolov5 Arguments: pretrained (bool): load pretrained weights into the model, default=False channels (int): number of input channels, default=3 classes (int): number of model classes, default=80 Returns: pytorch model
Here is the function:
def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True):
"""YOLOv5-medium model from https://github.com/ultralytics/yolov5
Arguments:
pretrained (bool): load pretrained weights into the model, default=False
channels (int): number of input channels, default=3
classes (int): number of model classes, default=80
Returns:
pytorch model
"""
return create('yolov5m', pretrained, channels, classes, autoshape) | YOLOv5-medium model from https://github.com/ultralytics/yolov5 Arguments: pretrained (bool): load pretrained weights into the model, default=False channels (int): number of input channels, default=3 classes (int): number of model classes, default=80 Returns: pytorch model |
14,650 | from pathlib import Path
import torch
from models.yolo import Model
from utils.general import set_logging
from utils.google_utils import attempt_download
def create(name, pretrained, channels, classes, autoshape):
"""Creates a specified YOLOv5 model
Arguments:
name (str): name of model, i.e. 'yolov5s'
pretrained (bool): load pretrained weights into the model
channels (int): number of input channels
classes (int): number of model classes
Returns:
pytorch model
"""
config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path
try:
model = Model(config, channels, classes)
if pretrained:
fname = f'{name}.pt' # checkpoint filename
attempt_download(fname) # download if not found locally
ckpt = torch.load(fname, map_location=torch.device('cpu')) # load
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter
model.load_state_dict(state_dict, strict=False) # load
if len(ckpt['model'].names) == classes:
model.names = ckpt['model'].names # set class names attribute
if autoshape:
model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
return model
except Exception as e:
help_url = 'https://github.com/ultralytics/yolov5/issues/36'
s = 'Cache maybe be out of date, try force_reload=True. See %s for help.' % help_url
raise Exception(s) from e
The provided code snippet includes necessary dependencies for implementing the `yolov5l` function. Write a Python function `def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True)` to solve the following problem:
YOLOv5-large model from https://github.com/ultralytics/yolov5 Arguments: pretrained (bool): load pretrained weights into the model, default=False channels (int): number of input channels, default=3 classes (int): number of model classes, default=80 Returns: pytorch model
Here is the function:
def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True):
"""YOLOv5-large model from https://github.com/ultralytics/yolov5
Arguments:
pretrained (bool): load pretrained weights into the model, default=False
channels (int): number of input channels, default=3
classes (int): number of model classes, default=80
Returns:
pytorch model
"""
return create('yolov5l', pretrained, channels, classes, autoshape) | YOLOv5-large model from https://github.com/ultralytics/yolov5 Arguments: pretrained (bool): load pretrained weights into the model, default=False channels (int): number of input channels, default=3 classes (int): number of model classes, default=80 Returns: pytorch model |
14,651 | from pathlib import Path
import torch
from models.yolo import Model
from utils.general import set_logging
from utils.google_utils import attempt_download
def create(name, pretrained, channels, classes, autoshape):
"""Creates a specified YOLOv5 model
Arguments:
name (str): name of model, i.e. 'yolov5s'
pretrained (bool): load pretrained weights into the model
channels (int): number of input channels
classes (int): number of model classes
Returns:
pytorch model
"""
config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path
try:
model = Model(config, channels, classes)
if pretrained:
fname = f'{name}.pt' # checkpoint filename
attempt_download(fname) # download if not found locally
ckpt = torch.load(fname, map_location=torch.device('cpu')) # load
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter
model.load_state_dict(state_dict, strict=False) # load
if len(ckpt['model'].names) == classes:
model.names = ckpt['model'].names # set class names attribute
if autoshape:
model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
return model
except Exception as e:
help_url = 'https://github.com/ultralytics/yolov5/issues/36'
s = 'Cache maybe be out of date, try force_reload=True. See %s for help.' % help_url
raise Exception(s) from e
The provided code snippet includes necessary dependencies for implementing the `yolov5x` function. Write a Python function `def yolov5x(pretrained=False, channels=3, classes=80, autoshape=True)` to solve the following problem:
YOLOv5-xlarge model from https://github.com/ultralytics/yolov5 Arguments: pretrained (bool): load pretrained weights into the model, default=False channels (int): number of input channels, default=3 classes (int): number of model classes, default=80 Returns: pytorch model
Here is the function:
def yolov5x(pretrained=False, channels=3, classes=80, autoshape=True):
"""YOLOv5-xlarge model from https://github.com/ultralytics/yolov5
Arguments:
pretrained (bool): load pretrained weights into the model, default=False
channels (int): number of input channels, default=3
classes (int): number of model classes, default=80
Returns:
pytorch model
"""
return create('yolov5x', pretrained, channels, classes, autoshape) | YOLOv5-xlarge model from https://github.com/ultralytics/yolov5 Arguments: pretrained (bool): load pretrained weights into the model, default=False channels (int): number of input channels, default=3 classes (int): number of model classes, default=80 Returns: pytorch model |
14,652 | from pathlib import Path
import torch
from models.yolo import Model
from utils.general import set_logging
from utils.google_utils import attempt_download
class Model(nn.Module):
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes
super(Model, self).__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg) as f:
self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
if nc and nc != self.yaml['nc']:
logger.info('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc))
self.yaml['nc'] = nc # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
# print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
# Build strides, anchors
m = self.model[-1] # Detect()
if isinstance(m, Detect):
s = 128 # 2x min stride
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases() # only run once
# print('Strides: %s' % m.stride.tolist())
# Init weights, biases
initialize_weights(self)
self.info()
logger.info('')
def forward(self, x, augment=False, profile=False):
if augment:
img_size = x.shape[-2:] # height, width
s = [1, 0.83, 0.67] # scales
f = [None, 3, None] # flips (2-ud, 3-lr)
y = [] # outputs
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si)
yi = self.forward_once(xi)[0] # forward
# cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
yi[..., :4] /= si # de-scale
if fi == 2:
yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
elif fi == 3:
yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
y.append(yi)
return torch.cat(y, 1), None # augmented inference, train
else:
return self.forward_once(x, profile) # single-scale inference, train
def forward_once(self, x, profile=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
t = time_synchronized()
for _ in range(10):
_ = m(x)
dt.append((time_synchronized() - t) * 100)
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if profile:
print('%.1fms total' % sum(dt))
return x
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
m = self.model[-1] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def _print_biases(self):
m = self.model[-1] # Detect() module
for mi in m.m: # from
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
# def _print_weights(self):
# for m in self.model.modules():
# if type(m) is Bottleneck:
# print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers... ')
for m in self.model.modules():
if type(m) is Conv and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, 'bn') # remove batchnorm
m.forward = m.fuseforward # update forward
elif type(m) is nn.Upsample:
m.recompute_scale_factor = None # torch 1.11.0 compatibility
self.info()
return self
def nms(self, mode=True): # add or remove NMS module
present = type(self.model[-1]) is NMS # last layer is NMS
if mode and not present:
print('Adding NMS... ')
m = NMS() # module
m.f = -1 # from
m.i = self.model[-1].i + 1 # index
self.model.add_module(name='%s' % m.i, module=m) # add
self.eval()
elif not mode and present:
print('Removing NMS... ')
self.model = self.model[:-1] # remove
return self
def autoshape(self): # add autoShape module
print('Adding autoShape... ')
m = autoShape(self) # wrap model
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
return m
def info(self, verbose=False, img_size=640): # print model information
model_info(self, verbose, img_size)
The provided code snippet includes necessary dependencies for implementing the `custom` function. Write a Python function `def custom(path_or_model='path/to/model.pt', autoshape=True)` to solve the following problem:
YOLOv5-custom model from https://github.com/ultralytics/yolov5 Arguments (3 options): path_or_model (str): 'path/to/model.pt' path_or_model (dict): torch.load('path/to/model.pt') path_or_model (nn.Module): torch.load('path/to/model.pt')['model'] Returns: pytorch model
Here is the function:
def custom(path_or_model='path/to/model.pt', autoshape=True):
"""YOLOv5-custom model from https://github.com/ultralytics/yolov5
Arguments (3 options):
path_or_model (str): 'path/to/model.pt'
path_or_model (dict): torch.load('path/to/model.pt')
path_or_model (nn.Module): torch.load('path/to/model.pt')['model']
Returns:
pytorch model
"""
model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint
if isinstance(model, dict):
model = model['model'] # load model
hub_model = Model(model.yaml).to(next(model.parameters()).device) # create
hub_model.load_state_dict(model.float().state_dict()) # load state_dict
hub_model.names = model.names # class names
return hub_model.autoshape() if autoshape else hub_model | YOLOv5-custom model from https://github.com/ultralytics/yolov5 Arguments (3 options): path_or_model (str): 'path/to/model.pt' path_or_model (dict): torch.load('path/to/model.pt') path_or_model (nn.Module): torch.load('path/to/model.pt')['model'] Returns: pytorch model |
14,653 | import argparse
import logging
import math
import sys
from copy import deepcopy
from pathlib import Path
import torch
import torch.nn as nn
logger = logging.getLogger(__name__)
from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, C3, ShuffleV2Block, Concat, NMS, autoShape, StemBlock, BlazeBlock, DoubleBlazeBlock
from models.experimental import MixConv2d, CrossConv
from utils.autoanchor import check_anchor_order
from utils.general import make_divisible, check_file, set_logging
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
select_device, copy_attr
class Detect(nn.Module):
stride = None # strides computed during build
export_cat = False # onnx export cat output
def __init__(self, nc=80, anchors=(), ch=()): # detection layer
super(Detect, self).__init__()
self.nc = nc # number of classes
#self.no = nc + 5 # number of outputs per anchor
self.no = nc + 5 + 10 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a) # shape(nl,na,2)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
def forward(self, x):
# x = x.copy() # for profiling
z = [] # inference output
if self.export_cat:
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
# self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
self.grid[i], self.anchor_grid[i] = self._make_grid_new(nx, ny,i)
y = torch.full_like(x[i], 0)
y = y + torch.cat((x[i][:, :, :, :, 0:5].sigmoid(), torch.cat((x[i][:, :, :, :, 5:15], x[i][:, :, :, :, 15:15+self.nc].sigmoid()), 4)), 4)
box_xy = (y[:, :, :, :, 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
box_wh = (y[:, :, :, :, 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
# box_conf = torch.cat((box_xy, torch.cat((box_wh, y[:, :, :, :, 4:5]), 4)), 4)
landm1 = y[:, :, :, :, 5:7] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x1 y1
landm2 = y[:, :, :, :, 7:9] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x2 y2
landm3 = y[:, :, :, :, 9:11] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x3 y3
landm4 = y[:, :, :, :, 11:13] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x4 y4
landm5 = y[:, :, :, :, 13:15] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x5 y5
# landm = torch.cat((landm1, torch.cat((landm2, torch.cat((landm3, torch.cat((landm4, landm5), 4)), 4)), 4)), 4)
# y = torch.cat((box_conf, torch.cat((landm, y[:, :, :, :, 15:15+self.nc]), 4)), 4)
y = torch.cat([box_xy, box_wh, y[:, :, :, :, 4:5], landm1, landm2, landm3, landm4, landm5, y[:, :, :, :, 15:15+self.nc]], -1)
z.append(y.view(bs, -1, self.no))
return torch.cat(z, 1)
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
y = torch.full_like(x[i], 0)
class_range = list(range(5)) + list(range(15,15+self.nc))
y[..., class_range] = x[i][..., class_range].sigmoid()
y[..., 5:15] = x[i][..., 5:15]
#y = x[i].sigmoid()
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
#y[..., 5:15] = y[..., 5:15] * 8 - 4
y[..., 5:7] = y[..., 5:7] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x1 y1
y[..., 7:9] = y[..., 7:9] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x2 y2
y[..., 9:11] = y[..., 9:11] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x3 y3
y[..., 11:13] = y[..., 11:13] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x4 y4
y[..., 13:15] = y[..., 13:15] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x5 y5
#y[..., 5:7] = (y[..., 5:7] * 2 -1) * self.anchor_grid[i] # landmark x1 y1
#y[..., 7:9] = (y[..., 7:9] * 2 -1) * self.anchor_grid[i] # landmark x2 y2
#y[..., 9:11] = (y[..., 9:11] * 2 -1) * self.anchor_grid[i] # landmark x3 y3
#y[..., 11:13] = (y[..., 11:13] * 2 -1) * self.anchor_grid[i] # landmark x4 y4
#y[..., 13:15] = (y[..., 13:15] * 2 -1) * self.anchor_grid[i] # landmark x5 y5
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
def _make_grid_new(self,nx=20, ny=20,i=0):
d = self.anchors[i].device
if '1.10.0' in torch.__version__: # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility
yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)], indexing='ij')
else:
yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)])
grid = torch.stack((xv, yv), 2).expand((1, self.na, ny, nx, 2)).float()
anchor_grid = (self.anchors[i].clone() * self.stride[i]).view((1, self.na, 1, 1, 2)).expand((1, self.na, ny, nx, 2)).float()
return grid, anchor_grid
from thop import profile
from thop import clever_format
def DWConv(c1, c2, k=1, s=1, act=True):
# Depthwise convolution
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Conv, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
#self.act = self.act = nn.LeakyReLU(0.1, inplace=True) if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
class StemBlock(nn.Module):
def __init__(self, c1, c2, k=3, s=2, p=None, g=1, act=True):
super(StemBlock, self).__init__()
self.stem_1 = Conv(c1, c2, k, s, p, g, act)
self.stem_2a = Conv(c2, c2 // 2, 1, 1, 0)
self.stem_2b = Conv(c2 // 2, c2, 3, 2, 1)
self.stem_2p = nn.MaxPool2d(kernel_size=2,stride=2,ceil_mode=True)
self.stem_3 = Conv(c2 * 2, c2, 1, 1, 0)
def forward(self, x):
stem_1_out = self.stem_1(x)
stem_2a_out = self.stem_2a(stem_1_out)
stem_2b_out = self.stem_2b(stem_2a_out)
stem_2p_out = self.stem_2p(stem_1_out)
out = self.stem_3(torch.cat((stem_2b_out,stem_2p_out),1))
return out
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(C3, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
class ShuffleV2Block(nn.Module):
def __init__(self, inp, oup, stride):
super(ShuffleV2Block, self).__init__()
if not (1 <= stride <= 3):
raise ValueError('illegal stride value')
self.stride = stride
branch_features = oup // 2
assert (self.stride != 1) or (inp == branch_features << 1)
if self.stride > 1:
self.branch1 = nn.Sequential(
self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(inp),
nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.SiLU(),
)
else:
self.branch1 = nn.Sequential()
self.branch2 = nn.Sequential(
nn.Conv2d(inp if (self.stride > 1) else branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.SiLU(),
self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(branch_features),
nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.SiLU(),
)
def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
def forward(self, x):
if self.stride == 1:
x1, x2 = x.chunk(2, dim=1)
out = torch.cat((x1, self.branch2(x2)), dim=1)
else:
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
out = channel_shuffle(out, 2)
return out
class BlazeBlock(nn.Module):
def __init__(self, in_channels,out_channels,mid_channels=None,stride=1):
super(BlazeBlock, self).__init__()
mid_channels = mid_channels or in_channels
assert stride in [1, 2]
if stride>1:
self.use_pool = True
else:
self.use_pool = False
self.branch1 = nn.Sequential(
nn.Conv2d(in_channels=in_channels,out_channels=mid_channels,kernel_size=5,stride=stride,padding=2,groups=in_channels),
nn.BatchNorm2d(mid_channels),
nn.Conv2d(in_channels=mid_channels,out_channels=out_channels,kernel_size=1,stride=1),
nn.BatchNorm2d(out_channels),
)
if self.use_pool:
self.shortcut = nn.Sequential(
nn.MaxPool2d(kernel_size=stride, stride=stride),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1),
nn.BatchNorm2d(out_channels),
)
self.relu = nn.SiLU(inplace=True)
def forward(self, x):
branch1 = self.branch1(x)
out = (branch1+self.shortcut(x)) if self.use_pool else (branch1+x)
return self.relu(out)
class DoubleBlazeBlock(nn.Module):
def __init__(self,in_channels,out_channels,mid_channels=None,stride=1):
super(DoubleBlazeBlock, self).__init__()
mid_channels = mid_channels or in_channels
assert stride in [1, 2]
if stride > 1:
self.use_pool = True
else:
self.use_pool = False
self.branch1 = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=5, stride=stride,padding=2,groups=in_channels),
nn.BatchNorm2d(in_channels),
nn.Conv2d(in_channels=in_channels, out_channels=mid_channels, kernel_size=1, stride=1),
nn.BatchNorm2d(mid_channels),
nn.SiLU(inplace=True),
nn.Conv2d(in_channels=mid_channels, out_channels=mid_channels, kernel_size=5, stride=1,padding=2),
nn.BatchNorm2d(mid_channels),
nn.Conv2d(in_channels=mid_channels, out_channels=out_channels, kernel_size=1, stride=1),
nn.BatchNorm2d(out_channels),
)
if self.use_pool:
self.shortcut = nn.Sequential(
nn.MaxPool2d(kernel_size=stride, stride=stride),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1),
nn.BatchNorm2d(out_channels),
)
self.relu = nn.SiLU(inplace=True)
def forward(self, x):
branch1 = self.branch1(x)
out = (branch1 + self.shortcut(x)) if self.use_pool else (branch1 + x)
return self.relu(out)
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13)):
super(SPP, self).__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Focus, self).__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
# self.contract = Contract(gain=2)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
# return self.conv(self.contract(x))
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class CrossConv(nn.Module):
# Cross Convolution Downsample
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
super(CrossConv, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, (1, k), (1, s))
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class MixConv2d(nn.Module):
# Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
super(MixConv2d, self).__init__()
groups = len(k)
if equal_ch: # equal c_ per group
i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
else: # equal weight.numel() per group
b = [c2] + [0] * groups
a = np.eye(groups + 1, groups, k=-1)
a -= np.roll(a, 1, axis=1)
a *= np.array(k) ** 2
a[0] = 1
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
self.bn = nn.BatchNorm2d(c2)
self.act = nn.LeakyReLU(0.1, inplace=True)
def forward(self, x):
return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
def parse_model(d, ch): # model_dict, input_channels(3)
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3, ShuffleV2Block, StemBlock, BlazeBlock, DoubleBlazeBlock]:
c1, c2 = ch[f], args[0]
# Normal
# if i > 0 and args[0] != no: # channel expansion factor
# ex = 1.75 # exponential (default 2.0)
# e = math.log(c2 / ch[1]) / math.log(2)
# c2 = int(ch[1] * ex ** e)
# if m != Focus:
c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
# Experimental
# if i > 0 and args[0] != no: # channel expansion factor
# ex = 1 + gw # exponential (default 2.0)
# ch1 = 32 # ch[1]
# e = math.log(c2 / ch1) / math.log(2) # level 1-n
# c2 = int(ch1 * ex ** e)
# if m != Focus:
# c2 = make_divisible(c2, 8) if c2 != no else c2
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3]:
args.insert(2, n)
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[-1 if x == -1 else x + 1] for x in f])
elif m is Detect:
args.append([ch[x + 1] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
else:
c2 = ch[f]
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum([x.numel() for x in m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
ch.append(c2)
return nn.Sequential(*layers), sorted(save) | null |
14,654 | import math
import numpy as np
import requests
import torch
import torch.nn as nn
from PIL import Image, ImageDraw
from utils.datasets import letterbox
from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh
from utils.plots import color_list
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.