| from __future__ import absolute_import
|
| from __future__ import division
|
| from __future__ import print_function
|
|
|
| import os
|
| import sys
|
| import time
|
| from typing import List, Optional, Tuple
|
|
|
| import cv2
|
| import numpy as np
|
| import torch
|
| import torch.nn as nn
|
| import torch.nn.functional as F
|
| import torchvision.transforms as T
|
| import torchvision.transforms.functional as f
|
| from pydantic import BaseModel
|
|
|
| import logging
|
| logger = logging.getLogger(__name__)
|
|
|
|
|
| class BoundingBox(BaseModel):
|
| x1: int
|
| y1: int
|
| x2: int
|
| y2: int
|
| cls_id: int
|
| conf: float
|
|
|
|
|
| class TVFrameResult(BaseModel):
|
| frame_id: int
|
| boxes: list[BoundingBox]
|
| keypoints: list[tuple[int, int]]
|
|
|
| BatchNorm2d = nn.BatchNorm2d
|
| BN_MOMENTUM = 0.1
|
|
|
| def conv3x3(in_planes, out_planes, stride=1):
|
| """3x3 convolution with padding"""
|
| return nn.Conv2d(in_planes, out_planes, kernel_size=3,
|
| stride=stride, padding=1, bias=False)
|
|
|
|
|
| class BasicBlock(nn.Module):
|
| expansion = 1
|
|
|
| def __init__(self, inplanes, planes, stride=1, downsample=None):
|
| super(BasicBlock, self).__init__()
|
| self.conv1 = conv3x3(inplanes, planes, stride)
|
| self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
|
| self.relu = nn.ReLU(inplace=True)
|
| self.conv2 = conv3x3(planes, planes)
|
| self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
|
| self.downsample = downsample
|
| self.stride = stride
|
|
|
| def forward(self, x):
|
| residual = x
|
|
|
| out = self.conv1(x)
|
| out = self.bn1(out)
|
| out = self.relu(out)
|
|
|
| out = self.conv2(out)
|
| out = self.bn2(out)
|
|
|
| if self.downsample is not None:
|
| residual = self.downsample(x)
|
|
|
| out += residual
|
| out = self.relu(out)
|
|
|
| return out
|
|
|
|
|
| class Bottleneck(nn.Module):
|
| expansion = 4
|
|
|
| def __init__(self, inplanes, planes, stride=1, downsample=None):
|
| super(Bottleneck, self).__init__()
|
| self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
|
| self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
|
| self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
|
| padding=1, bias=False)
|
| self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
|
| self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
|
| bias=False)
|
| self.bn3 = BatchNorm2d(planes * self.expansion,
|
| momentum=BN_MOMENTUM)
|
| self.relu = nn.ReLU(inplace=True)
|
| self.downsample = downsample
|
| self.stride = stride
|
|
|
| def forward(self, x):
|
| residual = x
|
|
|
| out = self.conv1(x)
|
| out = self.bn1(out)
|
| out = self.relu(out)
|
|
|
| out = self.conv2(out)
|
| out = self.bn2(out)
|
| out = self.relu(out)
|
|
|
| out = self.conv3(out)
|
| out = self.bn3(out)
|
|
|
| if self.downsample is not None:
|
| residual = self.downsample(x)
|
|
|
| out += residual
|
| out = self.relu(out)
|
|
|
| return out
|
|
|
|
|
| class HighResolutionModule(nn.Module):
|
| def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
|
| num_channels, fuse_method, multi_scale_output=True):
|
| super(HighResolutionModule, self).__init__()
|
| self._check_branches(
|
| num_branches, blocks, num_blocks, num_inchannels, num_channels)
|
|
|
| self.num_inchannels = num_inchannels
|
| self.fuse_method = fuse_method
|
| self.num_branches = num_branches
|
|
|
| self.multi_scale_output = multi_scale_output
|
|
|
| self.branches = self._make_branches(
|
| num_branches, blocks, num_blocks, num_channels)
|
| self.fuse_layers = self._make_fuse_layers()
|
| self.relu = nn.ReLU(inplace=True)
|
|
|
| def _check_branches(self, num_branches, blocks, num_blocks,
|
| num_inchannels, num_channels):
|
| if num_branches != len(num_blocks):
|
| error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
|
| num_branches, len(num_blocks))
|
| logger.error(error_msg)
|
| raise ValueError(error_msg)
|
|
|
| if num_branches != len(num_channels):
|
| error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
|
| num_branches, len(num_channels))
|
| logger.error(error_msg)
|
| raise ValueError(error_msg)
|
|
|
| if num_branches != len(num_inchannels):
|
| error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
|
| num_branches, len(num_inchannels))
|
| logger.error(error_msg)
|
| raise ValueError(error_msg)
|
|
|
| def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
|
| stride=1):
|
| downsample = None
|
| if stride != 1 or \
|
| self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
|
| downsample = nn.Sequential(
|
| nn.Conv2d(self.num_inchannels[branch_index],
|
| num_channels[branch_index] * block.expansion,
|
| kernel_size=1, stride=stride, bias=False),
|
| BatchNorm2d(num_channels[branch_index] * block.expansion,
|
| momentum=BN_MOMENTUM),
|
| )
|
|
|
| layers = []
|
| layers.append(block(self.num_inchannels[branch_index],
|
| num_channels[branch_index], stride, downsample))
|
| self.num_inchannels[branch_index] = \
|
| num_channels[branch_index] * block.expansion
|
| for i in range(1, num_blocks[branch_index]):
|
| layers.append(block(self.num_inchannels[branch_index],
|
| num_channels[branch_index]))
|
|
|
| return nn.Sequential(*layers)
|
|
|
| def _make_branches(self, num_branches, block, num_blocks, num_channels):
|
| branches = []
|
|
|
| for i in range(num_branches):
|
| branches.append(
|
| self._make_one_branch(i, block, num_blocks, num_channels))
|
|
|
| return nn.ModuleList(branches)
|
|
|
| def _make_fuse_layers(self):
|
| if self.num_branches == 1:
|
| return None
|
|
|
| num_branches = self.num_branches
|
| num_inchannels = self.num_inchannels
|
| fuse_layers = []
|
| for i in range(num_branches if self.multi_scale_output else 1):
|
| fuse_layer = []
|
| for j in range(num_branches):
|
| if j > i:
|
| fuse_layer.append(nn.Sequential(
|
| nn.Conv2d(num_inchannels[j],
|
| num_inchannels[i],
|
| 1,
|
| 1,
|
| 0,
|
| bias=False),
|
| BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM)))
|
|
|
| elif j == i:
|
| fuse_layer.append(None)
|
| else:
|
| conv3x3s = []
|
| for k in range(i - j):
|
| if k == i - j - 1:
|
| num_outchannels_conv3x3 = num_inchannels[i]
|
| conv3x3s.append(nn.Sequential(
|
| nn.Conv2d(num_inchannels[j],
|
| num_outchannels_conv3x3,
|
| 3, 2, 1, bias=False),
|
| BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM)))
|
| else:
|
| num_outchannels_conv3x3 = num_inchannels[j]
|
| conv3x3s.append(nn.Sequential(
|
| nn.Conv2d(num_inchannels[j],
|
| num_outchannels_conv3x3,
|
| 3, 2, 1, bias=False),
|
| BatchNorm2d(num_outchannels_conv3x3,
|
| momentum=BN_MOMENTUM),
|
| nn.ReLU(inplace=True)))
|
| fuse_layer.append(nn.Sequential(*conv3x3s))
|
| fuse_layers.append(nn.ModuleList(fuse_layer))
|
|
|
| return nn.ModuleList(fuse_layers)
|
|
|
| def get_num_inchannels(self):
|
| return self.num_inchannels
|
|
|
| def forward(self, x):
|
| if self.num_branches == 1:
|
| return [self.branches[0](x[0])]
|
|
|
| for i in range(self.num_branches):
|
| x[i] = self.branches[i](x[i])
|
|
|
| x_fuse = []
|
| for i in range(len(self.fuse_layers)):
|
| y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
|
| for j in range(1, self.num_branches):
|
| if i == j:
|
| y = y + x[j]
|
| elif j > i:
|
| y = y + F.interpolate(
|
| self.fuse_layers[i][j](x[j]),
|
| size=[x[i].shape[2], x[i].shape[3]],
|
| mode='bilinear')
|
| else:
|
| y = y + self.fuse_layers[i][j](x[j])
|
| x_fuse.append(self.relu(y))
|
|
|
| return x_fuse
|
|
|
|
|
| blocks_dict = {
|
| 'BASIC': BasicBlock,
|
| 'BOTTLENECK': Bottleneck
|
| }
|
|
|
|
|
| class HighResolutionNet(nn.Module):
|
|
|
| def __init__(self, config, **kwargs):
|
| self.inplanes = 64
|
| extra = config['MODEL']['EXTRA']
|
| super(HighResolutionNet, self).__init__()
|
|
|
|
|
| self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=2, padding=1,
|
| bias=False)
|
| self.bn1 = BatchNorm2d(self.inplanes, momentum=BN_MOMENTUM)
|
| self.conv2 = nn.Conv2d(self.inplanes, self.inplanes, kernel_size=3, stride=2, padding=1,
|
| bias=False)
|
| self.bn2 = BatchNorm2d(self.inplanes, momentum=BN_MOMENTUM)
|
| self.relu = nn.ReLU(inplace=True)
|
| self.sf = nn.Softmax(dim=1)
|
| self.layer1 = self._make_layer(Bottleneck, 64, 64, 4)
|
|
|
| self.stage2_cfg = extra['STAGE2']
|
| num_channels = self.stage2_cfg['NUM_CHANNELS']
|
| block = blocks_dict[self.stage2_cfg['BLOCK']]
|
| num_channels = [
|
| num_channels[i] * block.expansion for i in range(len(num_channels))]
|
| self.transition1 = self._make_transition_layer(
|
| [256], num_channels)
|
| self.stage2, pre_stage_channels = self._make_stage(
|
| self.stage2_cfg, num_channels)
|
|
|
| self.stage3_cfg = extra['STAGE3']
|
| num_channels = self.stage3_cfg['NUM_CHANNELS']
|
| block = blocks_dict[self.stage3_cfg['BLOCK']]
|
| num_channels = [
|
| num_channels[i] * block.expansion for i in range(len(num_channels))]
|
| self.transition2 = self._make_transition_layer(
|
| pre_stage_channels, num_channels)
|
| self.stage3, pre_stage_channels = self._make_stage(
|
| self.stage3_cfg, num_channels)
|
|
|
| self.stage4_cfg = extra['STAGE4']
|
| num_channels = self.stage4_cfg['NUM_CHANNELS']
|
| block = blocks_dict[self.stage4_cfg['BLOCK']]
|
| num_channels = [
|
| num_channels[i] * block.expansion for i in range(len(num_channels))]
|
| self.transition3 = self._make_transition_layer(
|
| pre_stage_channels, num_channels)
|
| self.stage4, pre_stage_channels = self._make_stage(
|
| self.stage4_cfg, num_channels, multi_scale_output=True)
|
|
|
| self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
|
| final_inp_channels = sum(pre_stage_channels) + self.inplanes
|
|
|
| self.head = nn.Sequential(nn.Sequential(
|
| nn.Conv2d(
|
| in_channels=final_inp_channels,
|
| out_channels=final_inp_channels,
|
| kernel_size=1),
|
| BatchNorm2d(final_inp_channels, momentum=BN_MOMENTUM),
|
| nn.ReLU(inplace=True),
|
| nn.Conv2d(
|
| in_channels=final_inp_channels,
|
| out_channels=config['MODEL']['NUM_JOINTS'],
|
| kernel_size=extra['FINAL_CONV_KERNEL']),
|
| nn.Softmax(dim=1)))
|
|
|
|
|
|
|
| def _make_head(self, x, x_skip):
|
| x = self.upsample(x)
|
| x = torch.cat([x, x_skip], dim=1)
|
| x = self.head(x)
|
|
|
| return x
|
|
|
| def _make_transition_layer(
|
| self, num_channels_pre_layer, num_channels_cur_layer):
|
| num_branches_cur = len(num_channels_cur_layer)
|
| num_branches_pre = len(num_channels_pre_layer)
|
|
|
| transition_layers = []
|
| for i in range(num_branches_cur):
|
| if i < num_branches_pre:
|
| if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
|
| transition_layers.append(nn.Sequential(
|
| nn.Conv2d(num_channels_pre_layer[i],
|
| num_channels_cur_layer[i],
|
| 3,
|
| 1,
|
| 1,
|
| bias=False),
|
| BatchNorm2d(
|
| num_channels_cur_layer[i], momentum=BN_MOMENTUM),
|
| nn.ReLU(inplace=True)))
|
| else:
|
| transition_layers.append(None)
|
| else:
|
| conv3x3s = []
|
| for j in range(i + 1 - num_branches_pre):
|
| inchannels = num_channels_pre_layer[-1]
|
| outchannels = num_channels_cur_layer[i] \
|
| if j == i - num_branches_pre else inchannels
|
| conv3x3s.append(nn.Sequential(
|
| nn.Conv2d(
|
| inchannels, outchannels, 3, 2, 1, bias=False),
|
| BatchNorm2d(outchannels, momentum=BN_MOMENTUM),
|
| nn.ReLU(inplace=True)))
|
| transition_layers.append(nn.Sequential(*conv3x3s))
|
|
|
| return nn.ModuleList(transition_layers)
|
|
|
| def _make_layer(self, block, inplanes, planes, blocks, stride=1):
|
| downsample = None
|
| if stride != 1 or inplanes != planes * block.expansion:
|
| downsample = nn.Sequential(
|
| nn.Conv2d(inplanes, planes * block.expansion,
|
| kernel_size=1, stride=stride, bias=False),
|
| BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
|
| )
|
|
|
| layers = []
|
| layers.append(block(inplanes, planes, stride, downsample))
|
| inplanes = planes * block.expansion
|
| for i in range(1, blocks):
|
| layers.append(block(inplanes, planes))
|
|
|
| return nn.Sequential(*layers)
|
|
|
| def _make_stage(self, layer_config, num_inchannels,
|
| multi_scale_output=True):
|
| num_modules = layer_config['NUM_MODULES']
|
| num_branches = layer_config['NUM_BRANCHES']
|
| num_blocks = layer_config['NUM_BLOCKS']
|
| num_channels = layer_config['NUM_CHANNELS']
|
| block = blocks_dict[layer_config['BLOCK']]
|
| fuse_method = layer_config['FUSE_METHOD']
|
|
|
| modules = []
|
| for i in range(num_modules):
|
|
|
| if not multi_scale_output and i == num_modules - 1:
|
| reset_multi_scale_output = False
|
| else:
|
| reset_multi_scale_output = True
|
| modules.append(
|
| HighResolutionModule(num_branches,
|
| block,
|
| num_blocks,
|
| num_inchannels,
|
| num_channels,
|
| fuse_method,
|
| reset_multi_scale_output)
|
| )
|
| num_inchannels = modules[-1].get_num_inchannels()
|
|
|
| return nn.Sequential(*modules), num_inchannels
|
|
|
| def forward(self, x):
|
|
|
| x = self.conv1(x)
|
| x_skip = x.clone()
|
| x = self.bn1(x)
|
| x = self.relu(x)
|
| x = self.conv2(x)
|
| x = self.bn2(x)
|
| x = self.relu(x)
|
| x = self.layer1(x)
|
|
|
| x_list = []
|
| for i in range(self.stage2_cfg['NUM_BRANCHES']):
|
| if self.transition1[i] is not None:
|
| x_list.append(self.transition1[i](x))
|
| else:
|
| x_list.append(x)
|
| y_list = self.stage2(x_list)
|
|
|
| x_list = []
|
| for i in range(self.stage3_cfg['NUM_BRANCHES']):
|
| if self.transition2[i] is not None:
|
| x_list.append(self.transition2[i](y_list[-1]))
|
| else:
|
| x_list.append(y_list[i])
|
| y_list = self.stage3(x_list)
|
|
|
| x_list = []
|
| for i in range(self.stage4_cfg['NUM_BRANCHES']):
|
| if self.transition3[i] is not None:
|
| x_list.append(self.transition3[i](y_list[-1]))
|
| else:
|
| x_list.append(y_list[i])
|
| x = self.stage4(x_list)
|
|
|
|
|
| height, width = x[0].size(2), x[0].size(3)
|
| x1 = F.interpolate(x[1], size=(height, width), mode='bilinear', align_corners=False)
|
| x2 = F.interpolate(x[2], size=(height, width), mode='bilinear', align_corners=False)
|
| x3 = F.interpolate(x[3], size=(height, width), mode='bilinear', align_corners=False)
|
| x = torch.cat([x[0], x1, x2, x3], 1)
|
| x = self._make_head(x, x_skip)
|
|
|
| return x
|
|
|
| def init_weights(self, pretrained=''):
|
| for m in self.modules():
|
| if isinstance(m, nn.Conv2d):
|
| nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
|
|
|
|
| elif isinstance(m, nn.BatchNorm2d):
|
| nn.init.constant_(m.weight, 1)
|
| nn.init.constant_(m.bias, 0)
|
| if pretrained != '':
|
| if os.path.isfile(pretrained):
|
| pretrained_dict = torch.load(pretrained)
|
| model_dict = self.state_dict()
|
| pretrained_dict = {k: v for k, v in pretrained_dict.items()
|
| if k in model_dict.keys()}
|
| model_dict.update(pretrained_dict)
|
| self.load_state_dict(model_dict)
|
| else:
|
| sys.exit(f'Weights {pretrained} not found.')
|
|
|
|
|
| def get_cls_net(config, pretrained='', **kwargs):
|
| """Create keypoint detection model with softmax activation"""
|
| model = HighResolutionNet(config, **kwargs)
|
| model.init_weights(pretrained)
|
| return model
|
|
|
|
|
| def get_cls_net_l(config, pretrained='', **kwargs):
|
| """Create line detection model with sigmoid activation"""
|
| model = HighResolutionNet(config, **kwargs)
|
| model.init_weights(pretrained)
|
|
|
|
|
|
|
| inner_seq = model.head[0]
|
|
|
| model.head[0][4] = nn.Sigmoid()
|
|
|
| return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| @torch.inference_mode()
|
| def run_inference(model, input_tensor: torch.Tensor, device):
|
| input_tensor = input_tensor.to(device).to(memory_format=torch.channels_last)
|
| output = model.module().forward(input_tensor)
|
| return output
|
|
|
| def preprocess_batch_fast(frames):
|
| """Ultra-fast batch preprocessing using optimized tensor operations"""
|
| target_size = (540, 960)
|
| batch = []
|
| for i, frame in enumerate(frames):
|
| frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| img = cv2.resize(frame_rgb, (target_size[1], target_size[0]))
|
| img = img.astype(np.float32) / 255.0
|
| img = np.transpose(img, (2, 0, 1))
|
| batch.append(img)
|
| batch = torch.from_numpy(np.stack(batch)).float()
|
|
|
| return batch
|
|
|
| def extract_keypoints_from_heatmap(heatmap: torch.Tensor, scale: int = 2, max_keypoints: int = 1):
|
| """Optimized keypoint extraction from heatmaps"""
|
| batch_size, n_channels, height, width = heatmap.shape
|
|
|
|
|
| kernel = 3
|
| pad = 1
|
| max_pooled = F.max_pool2d(heatmap, kernel, stride=1, padding=pad)
|
| local_maxima = (max_pooled == heatmap)
|
| heatmap = heatmap * local_maxima
|
|
|
|
|
| scores, indices = torch.topk(heatmap.view(batch_size, n_channels, -1), max_keypoints, sorted=False)
|
| y_coords = torch.div(indices, width, rounding_mode="floor")
|
| x_coords = indices % width
|
|
|
|
|
| x_coords = x_coords * scale
|
| y_coords = y_coords * scale
|
|
|
|
|
| results = torch.stack([x_coords.float(), y_coords.float(), scores], dim=-1)
|
|
|
| return results
|
|
|
|
|
| def extract_keypoints_from_heatmap_fast(heatmap: torch.Tensor, scale: int = 2, max_keypoints: int = 1):
|
| """Ultra-fast keypoint extraction optimized for speed"""
|
| batch_size, n_channels, height, width = heatmap.shape
|
|
|
|
|
| max_pooled = F.max_pool2d(heatmap, 3, stride=1, padding=1)
|
| local_maxima = (max_pooled == heatmap)
|
|
|
|
|
| masked_heatmap = heatmap * local_maxima
|
| flat_heatmap = masked_heatmap.view(batch_size, n_channels, -1)
|
| scores, indices = torch.topk(flat_heatmap, max_keypoints, dim=-1, sorted=False)
|
|
|
|
|
| y_coords = torch.div(indices, width, rounding_mode="floor") * scale
|
| x_coords = (indices % width) * scale
|
|
|
|
|
| results = torch.stack([x_coords.float(), y_coords.float(), scores], dim=-1)
|
| return results
|
|
|
|
|
| def process_keypoints_vectorized(kp_coords, kp_threshold, w, h, batch_size):
|
| """Ultra-fast vectorized keypoint processing"""
|
| batch_results = []
|
|
|
|
|
| kp_np = kp_coords.cpu().numpy()
|
|
|
| for batch_idx in range(batch_size):
|
| kp_dict = {}
|
|
|
| valid_kps = kp_np[batch_idx, :, 0, 2] > kp_threshold
|
| valid_indices = np.where(valid_kps)[0]
|
|
|
| for ch_idx in valid_indices:
|
| x = float(kp_np[batch_idx, ch_idx, 0, 0]) / w
|
| y = float(kp_np[batch_idx, ch_idx, 0, 1]) / h
|
| p = float(kp_np[batch_idx, ch_idx, 0, 2])
|
| kp_dict[ch_idx + 1] = {'x': x, 'y': y, 'p': p}
|
|
|
| batch_results.append(kp_dict)
|
|
|
| return batch_results
|
|
|
| def inference_batch(frames, model, kp_threshold, device, batch_size=8):
|
| """Optimized batch inference for multiple frames"""
|
| results = []
|
| num_frames = len(frames)
|
|
|
|
|
| model_device = next(model.parameters()).device
|
|
|
|
|
| for i in range(0, num_frames, batch_size):
|
| current_batch_size = min(batch_size, num_frames - i)
|
| batch_frames = frames[i:i + current_batch_size]
|
|
|
|
|
| batch = preprocess_batch_fast(batch_frames)
|
| b, c, h, w = batch.size()
|
|
|
|
|
| batch = batch.to(model_device)
|
|
|
| with torch.no_grad():
|
| heatmaps = model(batch)
|
|
|
|
|
| kp_coords = extract_keypoints_from_heatmap_fast(heatmaps[:,:-1,:,:], scale=2, max_keypoints=1)
|
|
|
|
|
| batch_results = process_keypoints_vectorized(kp_coords, kp_threshold, 960, 540, current_batch_size)
|
| results.extend(batch_results)
|
|
|
|
|
| del heatmaps, kp_coords, batch
|
|
|
| return results
|
|
|
|
|
| map_keypoints = {
|
| 1: 1, 2: 14, 3: 25, 4: 2, 5: 10, 6: 18, 7: 26, 8: 3, 9: 7, 10: 23,
|
| 11: 27, 20: 4, 21: 8, 22: 24, 23: 28, 24: 5, 25: 13, 26: 21, 27: 29,
|
| 28: 6, 29: 17, 30: 30, 31: 11, 32: 15, 33: 19, 34: 12, 35: 16, 36: 20,
|
| 45: 9, 50: 31, 52: 32, 57: 22
|
| }
|
|
|
| def get_mapped_keypoints(kp_points):
|
| """Apply keypoint mapping to detection results"""
|
| mapped_points = {}
|
| for key, value in kp_points.items():
|
| if key in map_keypoints:
|
| mapped_key = map_keypoints[key]
|
| mapped_points[mapped_key] = value
|
|
|
|
|
|
|
| return mapped_points
|
|
|
| def process_batch_input(frames, model, kp_threshold, device, batch_size=16):
|
| """Process multiple input images in batch"""
|
|
|
| kp_results = inference_batch(frames, model, kp_threshold, device, batch_size)
|
| kp_results = [get_mapped_keypoints(kp) for kp in kp_results]
|
|
|
| return kp_results |