Spaces:
Sleeping
Sleeping
Upload 10 files
Browse files- __init__.py +0 -0
- detection.py +55 -0
- detection.py.bak +49 -0
- iou_matching.py +82 -0
- kalman_filter.py +229 -0
- linear_assignment.py +192 -0
- nn_matching.py +176 -0
- preprocessing.py +73 -0
- track.py +169 -0
- tracker.py +143 -0
__init__.py
ADDED
|
File without changes
|
detection.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# vim: expandtab:ts=4:sw=4
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class Detection(object):
|
| 6 |
+
"""
|
| 7 |
+
This class represents a bounding box detection in a single image.
|
| 8 |
+
|
| 9 |
+
Parameters
|
| 10 |
+
----------
|
| 11 |
+
tlwh : array_like
|
| 12 |
+
Bounding box in format `(x, y, w, h)`.
|
| 13 |
+
confidence : float
|
| 14 |
+
Detector confidence score.
|
| 15 |
+
feature : array_like
|
| 16 |
+
A feature vector that describes the object contained in this image.
|
| 17 |
+
|
| 18 |
+
Attributes
|
| 19 |
+
----------
|
| 20 |
+
tlwh : ndarray
|
| 21 |
+
Bounding box in format `(top left x, top left y, width, height)`.
|
| 22 |
+
confidence : ndarray
|
| 23 |
+
Detector confidence score.
|
| 24 |
+
feature : ndarray | NoneType
|
| 25 |
+
A feature vector that describes the object contained in this image.
|
| 26 |
+
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self, tlwh, confidence, feature):
|
| 30 |
+
self.tlwh = np.asarray(tlwh, dtype=float)
|
| 31 |
+
self.confidence = float(confidence)
|
| 32 |
+
self.feature = np.asarray(feature, dtype=np.float32)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# def __init__(self, tlwh, confidence, feature):
|
| 36 |
+
# self.tlwh = np.asarray(tlwh, dtype=np.float)
|
| 37 |
+
# self.confidence = float(confidence)
|
| 38 |
+
# self.feature = np.asarray(feature, dtype=np.float32)
|
| 39 |
+
|
| 40 |
+
def to_tlbr(self):
|
| 41 |
+
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
|
| 42 |
+
`(top left, bottom right)`.
|
| 43 |
+
"""
|
| 44 |
+
ret = self.tlwh.copy()
|
| 45 |
+
ret[2:] += ret[:2]
|
| 46 |
+
return ret
|
| 47 |
+
|
| 48 |
+
def to_xyah(self):
|
| 49 |
+
"""Convert bounding box to format `(center x, center y, aspect ratio,
|
| 50 |
+
height)`, where the aspect ratio is `width / height`.
|
| 51 |
+
"""
|
| 52 |
+
ret = self.tlwh.copy()
|
| 53 |
+
ret[:2] += ret[2:] / 2
|
| 54 |
+
ret[2] /= ret[3]
|
| 55 |
+
return ret
|
detection.py.bak
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# vim: expandtab:ts=4:sw=4
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class Detection(object):
|
| 6 |
+
"""
|
| 7 |
+
This class represents a bounding box detection in a single image.
|
| 8 |
+
|
| 9 |
+
Parameters
|
| 10 |
+
----------
|
| 11 |
+
tlwh : array_like
|
| 12 |
+
Bounding box in format `(x, y, w, h)`.
|
| 13 |
+
confidence : float
|
| 14 |
+
Detector confidence score.
|
| 15 |
+
feature : array_like
|
| 16 |
+
A feature vector that describes the object contained in this image.
|
| 17 |
+
|
| 18 |
+
Attributes
|
| 19 |
+
----------
|
| 20 |
+
tlwh : ndarray
|
| 21 |
+
Bounding box in format `(top left x, top left y, width, height)`.
|
| 22 |
+
confidence : ndarray
|
| 23 |
+
Detector confidence score.
|
| 24 |
+
feature : ndarray | NoneType
|
| 25 |
+
A feature vector that describes the object contained in this image.
|
| 26 |
+
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self, tlwh, confidence, feature):
|
| 30 |
+
self.tlwh = np.asarray(tlwh, dtype=np.float)
|
| 31 |
+
self.confidence = float(confidence)
|
| 32 |
+
self.feature = np.asarray(feature, dtype=np.float32)
|
| 33 |
+
|
| 34 |
+
def to_tlbr(self):
|
| 35 |
+
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
|
| 36 |
+
`(top left, bottom right)`.
|
| 37 |
+
"""
|
| 38 |
+
ret = self.tlwh.copy()
|
| 39 |
+
ret[2:] += ret[:2]
|
| 40 |
+
return ret
|
| 41 |
+
|
| 42 |
+
def to_xyah(self):
|
| 43 |
+
"""Convert bounding box to format `(center x, center y, aspect ratio,
|
| 44 |
+
height)`, where the aspect ratio is `width / height`.
|
| 45 |
+
"""
|
| 46 |
+
ret = self.tlwh.copy()
|
| 47 |
+
ret[:2] += ret[2:] / 2
|
| 48 |
+
ret[2] /= ret[3]
|
| 49 |
+
return ret
|
iou_matching.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# vim: expandtab:ts=4:sw=4
|
| 2 |
+
from __future__ import absolute_import
|
| 3 |
+
import numpy as np
|
| 4 |
+
from . import linear_assignment
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def iou(bbox, candidates):
|
| 8 |
+
"""Computer intersection over union.
|
| 9 |
+
|
| 10 |
+
Parameters
|
| 11 |
+
----------
|
| 12 |
+
bbox : ndarray
|
| 13 |
+
A bounding box in format `(top left x, top left y, width, height)`.
|
| 14 |
+
candidates : ndarray
|
| 15 |
+
A matrix of candidate bounding boxes (one per row) in the same format
|
| 16 |
+
as `bbox`.
|
| 17 |
+
|
| 18 |
+
Returns
|
| 19 |
+
-------
|
| 20 |
+
ndarray
|
| 21 |
+
The intersection over union in [0, 1] between the `bbox` and each
|
| 22 |
+
candidate. A higher score means a larger fraction of the `bbox` is
|
| 23 |
+
occluded by the candidate.
|
| 24 |
+
|
| 25 |
+
"""
|
| 26 |
+
bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:]
|
| 27 |
+
candidates_tl = candidates[:, :2]
|
| 28 |
+
candidates_br = candidates[:, :2] + candidates[:, 2:]
|
| 29 |
+
|
| 30 |
+
tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis],
|
| 31 |
+
np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]]
|
| 32 |
+
br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis],
|
| 33 |
+
np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]]
|
| 34 |
+
wh = np.maximum(0., br - tl)
|
| 35 |
+
|
| 36 |
+
area_intersection = wh.prod(axis=1)
|
| 37 |
+
area_bbox = bbox[2:].prod()
|
| 38 |
+
area_candidates = candidates[:, 2:].prod(axis=1)
|
| 39 |
+
return area_intersection / (area_bbox + area_candidates - area_intersection)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def iou_cost(tracks, detections, track_indices=None,
|
| 43 |
+
detection_indices=None):
|
| 44 |
+
"""An intersection over union distance metric.
|
| 45 |
+
|
| 46 |
+
Parameters
|
| 47 |
+
----------
|
| 48 |
+
tracks : List[deep_sort.track.Track]
|
| 49 |
+
A list of tracks.
|
| 50 |
+
detections : List[deep_sort.detection.Detection]
|
| 51 |
+
A list of detections.
|
| 52 |
+
track_indices : Optional[List[int]]
|
| 53 |
+
A list of indices to tracks that should be matched. Defaults to
|
| 54 |
+
all `tracks`.
|
| 55 |
+
detection_indices : Optional[List[int]]
|
| 56 |
+
A list of indices to detections that should be matched. Defaults
|
| 57 |
+
to all `detections`.
|
| 58 |
+
|
| 59 |
+
Returns
|
| 60 |
+
-------
|
| 61 |
+
ndarray
|
| 62 |
+
Returns a cost matrix of shape
|
| 63 |
+
len(track_indices), len(detection_indices) where entry (i, j) is
|
| 64 |
+
`1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`.
|
| 65 |
+
|
| 66 |
+
"""
|
| 67 |
+
if track_indices is None:
|
| 68 |
+
track_indices = np.arange(len(tracks))
|
| 69 |
+
if detection_indices is None:
|
| 70 |
+
detection_indices = np.arange(len(detections))
|
| 71 |
+
|
| 72 |
+
cost_matrix = np.zeros((len(track_indices), len(detection_indices)))
|
| 73 |
+
for row, track_idx in enumerate(track_indices):
|
| 74 |
+
if tracks[track_idx].time_since_update > 1:
|
| 75 |
+
cost_matrix[row, :] = linear_assignment.INFTY_COST
|
| 76 |
+
continue
|
| 77 |
+
|
| 78 |
+
bbox = tracks[track_idx].to_tlwh()
|
| 79 |
+
candidates = np.asarray(
|
| 80 |
+
[detections[i].tlwh for i in detection_indices])
|
| 81 |
+
cost_matrix[row, :] = 1. - iou(bbox, candidates)
|
| 82 |
+
return cost_matrix
|
kalman_filter.py
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# vim: expandtab:ts=4:sw=4
|
| 2 |
+
import numpy as np
|
| 3 |
+
import scipy.linalg
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
"""
|
| 7 |
+
Table for the 0.95 quantile of the chi-square distribution with N degrees of
|
| 8 |
+
freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
|
| 9 |
+
function and used as Mahalanobis gating threshold.
|
| 10 |
+
"""
|
| 11 |
+
chi2inv95 = {
|
| 12 |
+
1: 3.8415,
|
| 13 |
+
2: 5.9915,
|
| 14 |
+
3: 7.8147,
|
| 15 |
+
4: 9.4877,
|
| 16 |
+
5: 11.070,
|
| 17 |
+
6: 12.592,
|
| 18 |
+
7: 14.067,
|
| 19 |
+
8: 15.507,
|
| 20 |
+
9: 16.919}
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class KalmanFilter(object):
|
| 24 |
+
"""
|
| 25 |
+
A simple Kalman filter for tracking bounding boxes in image space.
|
| 26 |
+
|
| 27 |
+
The 8-dimensional state space
|
| 28 |
+
|
| 29 |
+
x, y, a, h, vx, vy, va, vh
|
| 30 |
+
|
| 31 |
+
contains the bounding box center position (x, y), aspect ratio a, height h,
|
| 32 |
+
and their respective velocities.
|
| 33 |
+
|
| 34 |
+
Object motion follows a constant velocity model. The bounding box location
|
| 35 |
+
(x, y, a, h) is taken as direct observation of the state space (linear
|
| 36 |
+
observation model).
|
| 37 |
+
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def __init__(self):
|
| 41 |
+
ndim, dt = 4, 1.
|
| 42 |
+
|
| 43 |
+
# Create Kalman filter model matrices.
|
| 44 |
+
self._motion_mat = np.eye(2 * ndim, 2 * ndim)
|
| 45 |
+
for i in range(ndim):
|
| 46 |
+
self._motion_mat[i, ndim + i] = dt
|
| 47 |
+
self._update_mat = np.eye(ndim, 2 * ndim)
|
| 48 |
+
|
| 49 |
+
# Motion and observation uncertainty are chosen relative to the current
|
| 50 |
+
# state estimate. These weights control the amount of uncertainty in
|
| 51 |
+
# the model. This is a bit hacky.
|
| 52 |
+
self._std_weight_position = 1. / 20
|
| 53 |
+
self._std_weight_velocity = 1. / 160
|
| 54 |
+
|
| 55 |
+
def initiate(self, measurement):
|
| 56 |
+
"""Create track from unassociated measurement.
|
| 57 |
+
|
| 58 |
+
Parameters
|
| 59 |
+
----------
|
| 60 |
+
measurement : ndarray
|
| 61 |
+
Bounding box coordinates (x, y, a, h) with center position (x, y),
|
| 62 |
+
aspect ratio a, and height h.
|
| 63 |
+
|
| 64 |
+
Returns
|
| 65 |
+
-------
|
| 66 |
+
(ndarray, ndarray)
|
| 67 |
+
Returns the mean vector (8 dimensional) and covariance matrix (8x8
|
| 68 |
+
dimensional) of the new track. Unobserved velocities are initialized
|
| 69 |
+
to 0 mean.
|
| 70 |
+
|
| 71 |
+
"""
|
| 72 |
+
mean_pos = measurement
|
| 73 |
+
mean_vel = np.zeros_like(mean_pos)
|
| 74 |
+
mean = np.r_[mean_pos, mean_vel]
|
| 75 |
+
|
| 76 |
+
std = [
|
| 77 |
+
2 * self._std_weight_position * measurement[3],
|
| 78 |
+
2 * self._std_weight_position * measurement[3],
|
| 79 |
+
1e-2,
|
| 80 |
+
2 * self._std_weight_position * measurement[3],
|
| 81 |
+
10 * self._std_weight_velocity * measurement[3],
|
| 82 |
+
10 * self._std_weight_velocity * measurement[3],
|
| 83 |
+
1e-5,
|
| 84 |
+
10 * self._std_weight_velocity * measurement[3]]
|
| 85 |
+
covariance = np.diag(np.square(std))
|
| 86 |
+
return mean, covariance
|
| 87 |
+
|
| 88 |
+
def predict(self, mean, covariance):
|
| 89 |
+
"""Run Kalman filter prediction step.
|
| 90 |
+
|
| 91 |
+
Parameters
|
| 92 |
+
----------
|
| 93 |
+
mean : ndarray
|
| 94 |
+
The 8 dimensional mean vector of the object state at the previous
|
| 95 |
+
time step.
|
| 96 |
+
covariance : ndarray
|
| 97 |
+
The 8x8 dimensional covariance matrix of the object state at the
|
| 98 |
+
previous time step.
|
| 99 |
+
|
| 100 |
+
Returns
|
| 101 |
+
-------
|
| 102 |
+
(ndarray, ndarray)
|
| 103 |
+
Returns the mean vector and covariance matrix of the predicted
|
| 104 |
+
state. Unobserved velocities are initialized to 0 mean.
|
| 105 |
+
|
| 106 |
+
"""
|
| 107 |
+
std_pos = [
|
| 108 |
+
self._std_weight_position * mean[3],
|
| 109 |
+
self._std_weight_position * mean[3],
|
| 110 |
+
1e-2,
|
| 111 |
+
self._std_weight_position * mean[3]]
|
| 112 |
+
std_vel = [
|
| 113 |
+
self._std_weight_velocity * mean[3],
|
| 114 |
+
self._std_weight_velocity * mean[3],
|
| 115 |
+
1e-5,
|
| 116 |
+
self._std_weight_velocity * mean[3]]
|
| 117 |
+
motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
|
| 118 |
+
|
| 119 |
+
mean = np.dot(self._motion_mat, mean)
|
| 120 |
+
covariance = np.linalg.multi_dot((
|
| 121 |
+
self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
|
| 122 |
+
|
| 123 |
+
return mean, covariance
|
| 124 |
+
|
| 125 |
+
def project(self, mean, covariance):
|
| 126 |
+
"""Project state distribution to measurement space.
|
| 127 |
+
|
| 128 |
+
Parameters
|
| 129 |
+
----------
|
| 130 |
+
mean : ndarray
|
| 131 |
+
The state's mean vector (8 dimensional array).
|
| 132 |
+
covariance : ndarray
|
| 133 |
+
The state's covariance matrix (8x8 dimensional).
|
| 134 |
+
|
| 135 |
+
Returns
|
| 136 |
+
-------
|
| 137 |
+
(ndarray, ndarray)
|
| 138 |
+
Returns the projected mean and covariance matrix of the given state
|
| 139 |
+
estimate.
|
| 140 |
+
|
| 141 |
+
"""
|
| 142 |
+
std = [
|
| 143 |
+
self._std_weight_position * mean[3],
|
| 144 |
+
self._std_weight_position * mean[3],
|
| 145 |
+
1e-1,
|
| 146 |
+
self._std_weight_position * mean[3]]
|
| 147 |
+
innovation_cov = np.diag(np.square(std))
|
| 148 |
+
|
| 149 |
+
mean = np.dot(self._update_mat, mean)
|
| 150 |
+
covariance = np.linalg.multi_dot((
|
| 151 |
+
self._update_mat, covariance, self._update_mat.T))
|
| 152 |
+
return mean, covariance + innovation_cov
|
| 153 |
+
|
| 154 |
+
def update(self, mean, covariance, measurement):
|
| 155 |
+
"""Run Kalman filter correction step.
|
| 156 |
+
|
| 157 |
+
Parameters
|
| 158 |
+
----------
|
| 159 |
+
mean : ndarray
|
| 160 |
+
The predicted state's mean vector (8 dimensional).
|
| 161 |
+
covariance : ndarray
|
| 162 |
+
The state's covariance matrix (8x8 dimensional).
|
| 163 |
+
measurement : ndarray
|
| 164 |
+
The 4 dimensional measurement vector (x, y, a, h), where (x, y)
|
| 165 |
+
is the center position, a the aspect ratio, and h the height of the
|
| 166 |
+
bounding box.
|
| 167 |
+
|
| 168 |
+
Returns
|
| 169 |
+
-------
|
| 170 |
+
(ndarray, ndarray)
|
| 171 |
+
Returns the measurement-corrected state distribution.
|
| 172 |
+
|
| 173 |
+
"""
|
| 174 |
+
projected_mean, projected_cov = self.project(mean, covariance)
|
| 175 |
+
|
| 176 |
+
chol_factor, lower = scipy.linalg.cho_factor(
|
| 177 |
+
projected_cov, lower=True, check_finite=False)
|
| 178 |
+
kalman_gain = scipy.linalg.cho_solve(
|
| 179 |
+
(chol_factor, lower), np.dot(covariance, self._update_mat.T).T,
|
| 180 |
+
check_finite=False).T
|
| 181 |
+
innovation = measurement - projected_mean
|
| 182 |
+
|
| 183 |
+
new_mean = mean + np.dot(innovation, kalman_gain.T)
|
| 184 |
+
new_covariance = covariance - np.linalg.multi_dot((
|
| 185 |
+
kalman_gain, projected_cov, kalman_gain.T))
|
| 186 |
+
return new_mean, new_covariance
|
| 187 |
+
|
| 188 |
+
def gating_distance(self, mean, covariance, measurements,
|
| 189 |
+
only_position=False):
|
| 190 |
+
"""Compute gating distance between state distribution and measurements.
|
| 191 |
+
|
| 192 |
+
A suitable distance threshold can be obtained from `chi2inv95`. If
|
| 193 |
+
`only_position` is False, the chi-square distribution has 4 degrees of
|
| 194 |
+
freedom, otherwise 2.
|
| 195 |
+
|
| 196 |
+
Parameters
|
| 197 |
+
----------
|
| 198 |
+
mean : ndarray
|
| 199 |
+
Mean vector over the state distribution (8 dimensional).
|
| 200 |
+
covariance : ndarray
|
| 201 |
+
Covariance of the state distribution (8x8 dimensional).
|
| 202 |
+
measurements : ndarray
|
| 203 |
+
An Nx4 dimensional matrix of N measurements, each in
|
| 204 |
+
format (x, y, a, h) where (x, y) is the bounding box center
|
| 205 |
+
position, a the aspect ratio, and h the height.
|
| 206 |
+
only_position : Optional[bool]
|
| 207 |
+
If True, distance computation is done with respect to the bounding
|
| 208 |
+
box center position only.
|
| 209 |
+
|
| 210 |
+
Returns
|
| 211 |
+
-------
|
| 212 |
+
ndarray
|
| 213 |
+
Returns an array of length N, where the i-th element contains the
|
| 214 |
+
squared Mahalanobis distance between (mean, covariance) and
|
| 215 |
+
`measurements[i]`.
|
| 216 |
+
|
| 217 |
+
"""
|
| 218 |
+
mean, covariance = self.project(mean, covariance)
|
| 219 |
+
if only_position:
|
| 220 |
+
mean, covariance = mean[:2], covariance[:2, :2]
|
| 221 |
+
measurements = measurements[:, :2]
|
| 222 |
+
|
| 223 |
+
cholesky_factor = np.linalg.cholesky(covariance)
|
| 224 |
+
d = measurements - mean
|
| 225 |
+
z = scipy.linalg.solve_triangular(
|
| 226 |
+
cholesky_factor, d.T, lower=True, check_finite=False,
|
| 227 |
+
overwrite_b=True)
|
| 228 |
+
squared_maha = np.sum(z * z, axis=0)
|
| 229 |
+
return squared_maha
|
linear_assignment.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# vim: expandtab:ts=4:sw=4
|
| 2 |
+
from __future__ import absolute_import
|
| 3 |
+
import numpy as np
|
| 4 |
+
# from sklearn.utils.linear_assignment_ import linear_assignment
|
| 5 |
+
from scipy.optimize import linear_sum_assignment as linear_assignment
|
| 6 |
+
from . import kalman_filter
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
INFTY_COST = 1e+5
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def min_cost_matching(
|
| 13 |
+
distance_metric, max_distance, tracks, detections, track_indices=None,
|
| 14 |
+
detection_indices=None):
|
| 15 |
+
"""Solve linear assignment problem.
|
| 16 |
+
|
| 17 |
+
Parameters
|
| 18 |
+
----------
|
| 19 |
+
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
|
| 20 |
+
The distance metric is given a list of tracks and detections as well as
|
| 21 |
+
a list of N track indices and M detection indices. The metric should
|
| 22 |
+
return the NxM dimensional cost matrix, where element (i, j) is the
|
| 23 |
+
association cost between the i-th track in the given track indices and
|
| 24 |
+
the j-th detection in the given detection_indices.
|
| 25 |
+
max_distance : float
|
| 26 |
+
Gating threshold. Associations with cost larger than this value are
|
| 27 |
+
disregarded.
|
| 28 |
+
tracks : List[track.Track]
|
| 29 |
+
A list of predicted tracks at the current time step.
|
| 30 |
+
detections : List[detection.Detection]
|
| 31 |
+
A list of detections at the current time step.
|
| 32 |
+
track_indices : List[int]
|
| 33 |
+
List of track indices that maps rows in `cost_matrix` to tracks in
|
| 34 |
+
`tracks` (see description above).
|
| 35 |
+
detection_indices : List[int]
|
| 36 |
+
List of detection indices that maps columns in `cost_matrix` to
|
| 37 |
+
detections in `detections` (see description above).
|
| 38 |
+
|
| 39 |
+
Returns
|
| 40 |
+
-------
|
| 41 |
+
(List[(int, int)], List[int], List[int])
|
| 42 |
+
Returns a tuple with the following three entries:
|
| 43 |
+
* A list of matched track and detection indices.
|
| 44 |
+
* A list of unmatched track indices.
|
| 45 |
+
* A list of unmatched detection indices.
|
| 46 |
+
|
| 47 |
+
"""
|
| 48 |
+
if track_indices is None:
|
| 49 |
+
track_indices = np.arange(len(tracks))
|
| 50 |
+
if detection_indices is None:
|
| 51 |
+
detection_indices = np.arange(len(detections))
|
| 52 |
+
|
| 53 |
+
if len(detection_indices) == 0 or len(track_indices) == 0:
|
| 54 |
+
return [], track_indices, detection_indices # Nothing to match.
|
| 55 |
+
|
| 56 |
+
cost_matrix = distance_metric(
|
| 57 |
+
tracks, detections, track_indices, detection_indices)
|
| 58 |
+
cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5
|
| 59 |
+
|
| 60 |
+
row_indices, col_indices = linear_assignment(cost_matrix)
|
| 61 |
+
|
| 62 |
+
matches, unmatched_tracks, unmatched_detections = [], [], []
|
| 63 |
+
for col, detection_idx in enumerate(detection_indices):
|
| 64 |
+
if col not in col_indices:
|
| 65 |
+
unmatched_detections.append(detection_idx)
|
| 66 |
+
for row, track_idx in enumerate(track_indices):
|
| 67 |
+
if row not in row_indices:
|
| 68 |
+
unmatched_tracks.append(track_idx)
|
| 69 |
+
for row, col in zip(row_indices, col_indices):
|
| 70 |
+
track_idx = track_indices[row]
|
| 71 |
+
detection_idx = detection_indices[col]
|
| 72 |
+
if cost_matrix[row, col] > max_distance:
|
| 73 |
+
unmatched_tracks.append(track_idx)
|
| 74 |
+
unmatched_detections.append(detection_idx)
|
| 75 |
+
else:
|
| 76 |
+
matches.append((track_idx, detection_idx))
|
| 77 |
+
return matches, unmatched_tracks, unmatched_detections
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def matching_cascade(
|
| 81 |
+
distance_metric, max_distance, cascade_depth, tracks, detections,
|
| 82 |
+
track_indices=None, detection_indices=None):
|
| 83 |
+
"""Run matching cascade.
|
| 84 |
+
|
| 85 |
+
Parameters
|
| 86 |
+
----------
|
| 87 |
+
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
|
| 88 |
+
The distance metric is given a list of tracks and detections as well as
|
| 89 |
+
a list of N track indices and M detection indices. The metric should
|
| 90 |
+
return the NxM dimensional cost matrix, where element (i, j) is the
|
| 91 |
+
association cost between the i-th track in the given track indices and
|
| 92 |
+
the j-th detection in the given detection indices.
|
| 93 |
+
max_distance : float
|
| 94 |
+
Gating threshold. Associations with cost larger than this value are
|
| 95 |
+
disregarded.
|
| 96 |
+
cascade_depth: int
|
| 97 |
+
The cascade depth, should be se to the maximum track age.
|
| 98 |
+
tracks : List[track.Track]
|
| 99 |
+
A list of predicted tracks at the current time step.
|
| 100 |
+
detections : List[detection.Detection]
|
| 101 |
+
A list of detections at the current time step.
|
| 102 |
+
track_indices : Optional[List[int]]
|
| 103 |
+
List of track indices that maps rows in `cost_matrix` to tracks in
|
| 104 |
+
`tracks` (see description above). Defaults to all tracks.
|
| 105 |
+
detection_indices : Optional[List[int]]
|
| 106 |
+
List of detection indices that maps columns in `cost_matrix` to
|
| 107 |
+
detections in `detections` (see description above). Defaults to all
|
| 108 |
+
detections.
|
| 109 |
+
|
| 110 |
+
Returns
|
| 111 |
+
-------
|
| 112 |
+
(List[(int, int)], List[int], List[int])
|
| 113 |
+
Returns a tuple with the following three entries:
|
| 114 |
+
* A list of matched track and detection indices.
|
| 115 |
+
* A list of unmatched track indices.
|
| 116 |
+
* A list of unmatched detection indices.
|
| 117 |
+
|
| 118 |
+
"""
|
| 119 |
+
if track_indices is None:
|
| 120 |
+
track_indices = list(range(len(tracks)))
|
| 121 |
+
if detection_indices is None:
|
| 122 |
+
detection_indices = list(range(len(detections)))
|
| 123 |
+
|
| 124 |
+
unmatched_detections = detection_indices
|
| 125 |
+
matches = []
|
| 126 |
+
for level in range(cascade_depth):
|
| 127 |
+
if len(unmatched_detections) == 0: # No detections left
|
| 128 |
+
break
|
| 129 |
+
|
| 130 |
+
track_indices_l = [
|
| 131 |
+
k for k in track_indices
|
| 132 |
+
if tracks[k].time_since_update == 1 + level
|
| 133 |
+
]
|
| 134 |
+
if len(track_indices_l) == 0: # Nothing to match at this level
|
| 135 |
+
continue
|
| 136 |
+
|
| 137 |
+
matches_l, _, unmatched_detections = \
|
| 138 |
+
min_cost_matching(
|
| 139 |
+
distance_metric, max_distance, tracks, detections,
|
| 140 |
+
track_indices_l, unmatched_detections)
|
| 141 |
+
matches += matches_l
|
| 142 |
+
unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches))
|
| 143 |
+
return matches, unmatched_tracks, unmatched_detections
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def gate_cost_matrix(
|
| 147 |
+
kf, cost_matrix, tracks, detections, track_indices, detection_indices,
|
| 148 |
+
gated_cost=INFTY_COST, only_position=False):
|
| 149 |
+
"""Invalidate infeasible entries in cost matrix based on the state
|
| 150 |
+
distributions obtained by Kalman filtering.
|
| 151 |
+
|
| 152 |
+
Parameters
|
| 153 |
+
----------
|
| 154 |
+
kf : The Kalman filter.
|
| 155 |
+
cost_matrix : ndarray
|
| 156 |
+
The NxM dimensional cost matrix, where N is the number of track indices
|
| 157 |
+
and M is the number of detection indices, such that entry (i, j) is the
|
| 158 |
+
association cost between `tracks[track_indices[i]]` and
|
| 159 |
+
`detections[detection_indices[j]]`.
|
| 160 |
+
tracks : List[track.Track]
|
| 161 |
+
A list of predicted tracks at the current time step.
|
| 162 |
+
detections : List[detection.Detection]
|
| 163 |
+
A list of detections at the current time step.
|
| 164 |
+
track_indices : List[int]
|
| 165 |
+
List of track indices that maps rows in `cost_matrix` to tracks in
|
| 166 |
+
`tracks` (see description above).
|
| 167 |
+
detection_indices : List[int]
|
| 168 |
+
List of detection indices that maps columns in `cost_matrix` to
|
| 169 |
+
detections in `detections` (see description above).
|
| 170 |
+
gated_cost : Optional[float]
|
| 171 |
+
Entries in the cost matrix corresponding to infeasible associations are
|
| 172 |
+
set this value. Defaults to a very large value.
|
| 173 |
+
only_position : Optional[bool]
|
| 174 |
+
If True, only the x, y position of the state distribution is considered
|
| 175 |
+
during gating. Defaults to False.
|
| 176 |
+
|
| 177 |
+
Returns
|
| 178 |
+
-------
|
| 179 |
+
ndarray
|
| 180 |
+
Returns the modified cost matrix.
|
| 181 |
+
|
| 182 |
+
"""
|
| 183 |
+
gating_dim = 2 if only_position else 4
|
| 184 |
+
gating_threshold = kalman_filter.chi2inv95[gating_dim]
|
| 185 |
+
measurements = np.asarray(
|
| 186 |
+
[detections[i].to_xyah() for i in detection_indices])
|
| 187 |
+
for row, track_idx in enumerate(track_indices):
|
| 188 |
+
track = tracks[track_idx]
|
| 189 |
+
gating_distance = kf.gating_distance(
|
| 190 |
+
track.mean, track.covariance, measurements, only_position)
|
| 191 |
+
cost_matrix[row, gating_distance > gating_threshold] = gated_cost
|
| 192 |
+
return cost_matrix
|
nn_matching.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# vim: expandtab:ts=4:sw=4
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def _pdist(a, b):
|
| 6 |
+
"""Compute pair-wise squared distance between points in `a` and `b`.
|
| 7 |
+
|
| 8 |
+
Parameters
|
| 9 |
+
----------
|
| 10 |
+
a : array_like
|
| 11 |
+
An NxM matrix of N samples of dimensionality M.
|
| 12 |
+
b : array_like
|
| 13 |
+
An LxM matrix of L samples of dimensionality M.
|
| 14 |
+
|
| 15 |
+
Returns
|
| 16 |
+
-------
|
| 17 |
+
ndarray
|
| 18 |
+
Returns a matrix of size len(a), len(b) such that eleement (i, j)
|
| 19 |
+
contains the squared distance between `a[i]` and `b[j]`.
|
| 20 |
+
|
| 21 |
+
"""
|
| 22 |
+
a, b = np.asarray(a), np.asarray(b)
|
| 23 |
+
if len(a) == 0 or len(b) == 0:
|
| 24 |
+
return np.zeros((len(a), len(b)))
|
| 25 |
+
a2, b2 = np.square(a).sum(axis=1), np.square(b).sum(axis=1)
|
| 26 |
+
r2 = -2. * np.dot(a, b.T) + a2[:, None] + b2[None, :]
|
| 27 |
+
r2 = np.clip(r2, 0., float(np.inf))
|
| 28 |
+
return r2
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _cosine_distance(a, b, data_is_normalized=False):
|
| 32 |
+
"""Compute pair-wise cosine distance between points in `a` and `b`.
|
| 33 |
+
|
| 34 |
+
Parameters
|
| 35 |
+
----------
|
| 36 |
+
a : array_like
|
| 37 |
+
An NxM matrix of N samples of dimensionality M.
|
| 38 |
+
b : array_like
|
| 39 |
+
An LxM matrix of L samples of dimensionality M.
|
| 40 |
+
data_is_normalized : Optional[bool]
|
| 41 |
+
If True, assumes rows in a and b are unit length vectors.
|
| 42 |
+
Otherwise, a and b are explicitly normalized to lenght 1.
|
| 43 |
+
|
| 44 |
+
Returns
|
| 45 |
+
-------
|
| 46 |
+
ndarray
|
| 47 |
+
Returns a matrix of size len(a), len(b) such that eleement (i, j)
|
| 48 |
+
contains the squared distance between `a[i]` and `b[j]`.
|
| 49 |
+
|
| 50 |
+
"""
|
| 51 |
+
if not data_is_normalized:
|
| 52 |
+
a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True)
|
| 53 |
+
b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True)
|
| 54 |
+
return 1. - np.dot(a, b.T)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _nn_euclidean_distance(x, y):
|
| 58 |
+
""" Helper function for nearest neighbor distance metric (Euclidean).
|
| 59 |
+
|
| 60 |
+
Parameters
|
| 61 |
+
----------
|
| 62 |
+
x : ndarray
|
| 63 |
+
A matrix of N row-vectors (sample points).
|
| 64 |
+
y : ndarray
|
| 65 |
+
A matrix of M row-vectors (query points).
|
| 66 |
+
|
| 67 |
+
Returns
|
| 68 |
+
-------
|
| 69 |
+
ndarray
|
| 70 |
+
A vector of length M that contains for each entry in `y` the
|
| 71 |
+
smallest Euclidean distance to a sample in `x`.
|
| 72 |
+
|
| 73 |
+
"""
|
| 74 |
+
distances = _pdist(x, y)
|
| 75 |
+
return np.maximum(0.0, distances.min(axis=0))
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def _nn_cosine_distance(x, y):
|
| 79 |
+
""" Helper function for nearest neighbor distance metric (cosine).
|
| 80 |
+
|
| 81 |
+
Parameters
|
| 82 |
+
----------
|
| 83 |
+
x : ndarray
|
| 84 |
+
A matrix of N row-vectors (sample points).
|
| 85 |
+
y : ndarray
|
| 86 |
+
A matrix of M row-vectors (query points).
|
| 87 |
+
|
| 88 |
+
Returns
|
| 89 |
+
-------
|
| 90 |
+
ndarray
|
| 91 |
+
A vector of length M that contains for each entry in `y` the
|
| 92 |
+
smallest cosine distance to a sample in `x`.
|
| 93 |
+
|
| 94 |
+
"""
|
| 95 |
+
distances = _cosine_distance(x, y)
|
| 96 |
+
return distances.min(axis=0)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class NearestNeighborDistanceMetric(object):
|
| 100 |
+
"""
|
| 101 |
+
A nearest neighbor distance metric that, for each target, returns
|
| 102 |
+
the closest distance to any sample that has been observed so far.
|
| 103 |
+
|
| 104 |
+
Parameters
|
| 105 |
+
----------
|
| 106 |
+
metric : str
|
| 107 |
+
Either "euclidean" or "cosine".
|
| 108 |
+
matching_threshold: float
|
| 109 |
+
The matching threshold. Samples with larger distance are considered an
|
| 110 |
+
invalid match.
|
| 111 |
+
budget : Optional[int]
|
| 112 |
+
If not None, fix samples per class to at most this number. Removes
|
| 113 |
+
the oldest samples when the budget is reached.
|
| 114 |
+
|
| 115 |
+
Attributes
|
| 116 |
+
----------
|
| 117 |
+
samples : Dict[int -> List[ndarray]]
|
| 118 |
+
A dictionary that maps from target identities to the list of samples
|
| 119 |
+
that have been observed so far.
|
| 120 |
+
|
| 121 |
+
"""
|
| 122 |
+
|
| 123 |
+
def __init__(self, metric, matching_threshold, budget=None):
|
| 124 |
+
|
| 125 |
+
if metric == "euclidean":
|
| 126 |
+
self._metric = _nn_euclidean_distance
|
| 127 |
+
elif metric == "cosine":
|
| 128 |
+
self._metric = _nn_cosine_distance
|
| 129 |
+
else:
|
| 130 |
+
raise ValueError(
|
| 131 |
+
"Invalid metric; must be either 'euclidean' or 'cosine'")
|
| 132 |
+
self.matching_threshold = matching_threshold
|
| 133 |
+
self.budget = budget
|
| 134 |
+
self.samples = {}
|
| 135 |
+
|
| 136 |
+
def partial_fit(self, features, targets, active_targets):
|
| 137 |
+
"""Update the distance metric with new data.
|
| 138 |
+
|
| 139 |
+
Parameters
|
| 140 |
+
----------
|
| 141 |
+
features : ndarray
|
| 142 |
+
An NxM matrix of N features of dimensionality M.
|
| 143 |
+
targets : ndarray
|
| 144 |
+
An integer array of associated target identities.
|
| 145 |
+
active_targets : List[int]
|
| 146 |
+
A list of targets that are currently present in the scene.
|
| 147 |
+
|
| 148 |
+
"""
|
| 149 |
+
for feature, target in zip(features, targets):
|
| 150 |
+
self.samples.setdefault(target, []).append(feature)
|
| 151 |
+
if self.budget is not None:
|
| 152 |
+
self.samples[target] = self.samples[target][-self.budget:]
|
| 153 |
+
self.samples = {k: self.samples[k] for k in active_targets}
|
| 154 |
+
|
| 155 |
+
def distance(self, features, targets):
|
| 156 |
+
"""Compute distance between features and targets.
|
| 157 |
+
|
| 158 |
+
Parameters
|
| 159 |
+
----------
|
| 160 |
+
features : ndarray
|
| 161 |
+
An NxM matrix of N features of dimensionality M.
|
| 162 |
+
targets : List[int]
|
| 163 |
+
A list of targets to match the given `features` against.
|
| 164 |
+
|
| 165 |
+
Returns
|
| 166 |
+
-------
|
| 167 |
+
ndarray
|
| 168 |
+
Returns a cost matrix of shape len(targets), len(features), where
|
| 169 |
+
element (i, j) contains the closest squared distance between
|
| 170 |
+
`targets[i]` and `features[j]`.
|
| 171 |
+
|
| 172 |
+
"""
|
| 173 |
+
cost_matrix = np.zeros((len(targets), len(features)))
|
| 174 |
+
for i, target in enumerate(targets):
|
| 175 |
+
cost_matrix[i, :] = self._metric(self.samples[target], features)
|
| 176 |
+
return cost_matrix
|
preprocessing.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# vim: expandtab:ts=4:sw=4
|
| 2 |
+
import numpy as np
|
| 3 |
+
import cv2
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def non_max_suppression(boxes, max_bbox_overlap, scores=None):
|
| 7 |
+
"""Suppress overlapping detections.
|
| 8 |
+
|
| 9 |
+
Original code from [1]_ has been adapted to include confidence score.
|
| 10 |
+
|
| 11 |
+
.. [1] http://www.pyimagesearch.com/2015/02/16/
|
| 12 |
+
faster-non-maximum-suppression-python/
|
| 13 |
+
|
| 14 |
+
Examples
|
| 15 |
+
--------
|
| 16 |
+
|
| 17 |
+
>>> boxes = [d.roi for d in detections]
|
| 18 |
+
>>> scores = [d.confidence for d in detections]
|
| 19 |
+
>>> indices = non_max_suppression(boxes, max_bbox_overlap, scores)
|
| 20 |
+
>>> detections = [detections[i] for i in indices]
|
| 21 |
+
|
| 22 |
+
Parameters
|
| 23 |
+
----------
|
| 24 |
+
boxes : ndarray
|
| 25 |
+
Array of ROIs (x, y, width, height).
|
| 26 |
+
max_bbox_overlap : float
|
| 27 |
+
ROIs that overlap more than this values are suppressed.
|
| 28 |
+
scores : Optional[array_like]
|
| 29 |
+
Detector confidence score.
|
| 30 |
+
|
| 31 |
+
Returns
|
| 32 |
+
-------
|
| 33 |
+
List[int]
|
| 34 |
+
Returns indices of detections that have survived non-maxima suppression.
|
| 35 |
+
|
| 36 |
+
"""
|
| 37 |
+
if len(boxes) == 0:
|
| 38 |
+
return []
|
| 39 |
+
|
| 40 |
+
boxes = boxes.astype(np.float)
|
| 41 |
+
pick = []
|
| 42 |
+
|
| 43 |
+
x1 = boxes[:, 0]
|
| 44 |
+
y1 = boxes[:, 1]
|
| 45 |
+
x2 = boxes[:, 2] + boxes[:, 0]
|
| 46 |
+
y2 = boxes[:, 3] + boxes[:, 1]
|
| 47 |
+
|
| 48 |
+
area = (x2 - x1 + 1) * (y2 - y1 + 1)
|
| 49 |
+
if scores is not None:
|
| 50 |
+
idxs = np.argsort(scores)
|
| 51 |
+
else:
|
| 52 |
+
idxs = np.argsort(y2)
|
| 53 |
+
|
| 54 |
+
while len(idxs) > 0:
|
| 55 |
+
last = len(idxs) - 1
|
| 56 |
+
i = idxs[last]
|
| 57 |
+
pick.append(i)
|
| 58 |
+
|
| 59 |
+
xx1 = np.maximum(x1[i], x1[idxs[:last]])
|
| 60 |
+
yy1 = np.maximum(y1[i], y1[idxs[:last]])
|
| 61 |
+
xx2 = np.minimum(x2[i], x2[idxs[:last]])
|
| 62 |
+
yy2 = np.minimum(y2[i], y2[idxs[:last]])
|
| 63 |
+
|
| 64 |
+
w = np.maximum(0, xx2 - xx1 + 1)
|
| 65 |
+
h = np.maximum(0, yy2 - yy1 + 1)
|
| 66 |
+
|
| 67 |
+
overlap = (w * h) / area[idxs[:last]]
|
| 68 |
+
|
| 69 |
+
idxs = np.delete(
|
| 70 |
+
idxs, np.concatenate(
|
| 71 |
+
([last], np.where(overlap > max_bbox_overlap)[0])))
|
| 72 |
+
|
| 73 |
+
return pick
|
track.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# vim: expandtab:ts=4:sw=4
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class TrackState:
|
| 5 |
+
"""
|
| 6 |
+
Enumeration type for the single target track state. Newly created tracks are
|
| 7 |
+
classified as `tentative` until enough evidence has been collected. Then,
|
| 8 |
+
the track state is changed to `confirmed`. Tracks that are no longer alive
|
| 9 |
+
are classified as `deleted` to mark them for removal from the set of active
|
| 10 |
+
tracks.
|
| 11 |
+
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
Tentative = 1
|
| 15 |
+
Confirmed = 2
|
| 16 |
+
Deleted = 3
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class Track:
|
| 20 |
+
"""
|
| 21 |
+
A single target track with state space `(x, y, a, h)` and associated
|
| 22 |
+
velocities, where `(x, y)` is the center of the bounding box, `a` is the
|
| 23 |
+
aspect ratio and `h` is the height.
|
| 24 |
+
|
| 25 |
+
Parameters
|
| 26 |
+
----------
|
| 27 |
+
mean : ndarray
|
| 28 |
+
Mean vector of the initial state distribution.
|
| 29 |
+
covariance : ndarray
|
| 30 |
+
Covariance matrix of the initial state distribution.
|
| 31 |
+
track_id : int
|
| 32 |
+
A unique track identifier.
|
| 33 |
+
n_init : int
|
| 34 |
+
Number of consecutive detections before the track is confirmed. The
|
| 35 |
+
track state is set to `Deleted` if a miss occurs within the first
|
| 36 |
+
`n_init` frames.
|
| 37 |
+
max_age : int
|
| 38 |
+
The maximum number of consecutive misses before the track state is
|
| 39 |
+
set to `Deleted`.
|
| 40 |
+
feature : Optional[ndarray]
|
| 41 |
+
Feature vector of the detection this track originates from. If not None,
|
| 42 |
+
this feature is added to the `features` cache.
|
| 43 |
+
|
| 44 |
+
Attributes
|
| 45 |
+
----------
|
| 46 |
+
mean : ndarray
|
| 47 |
+
Mean vector of the initial state distribution.
|
| 48 |
+
covariance : ndarray
|
| 49 |
+
Covariance matrix of the initial state distribution.
|
| 50 |
+
track_id : int
|
| 51 |
+
A unique track identifier.
|
| 52 |
+
hits : int
|
| 53 |
+
Total number of measurement updates.
|
| 54 |
+
age : int
|
| 55 |
+
Total number of frames since first occurance.
|
| 56 |
+
time_since_update : int
|
| 57 |
+
Total number of frames since last measurement update.
|
| 58 |
+
state : TrackState
|
| 59 |
+
The current track state.
|
| 60 |
+
features : List[ndarray]
|
| 61 |
+
A cache of features. On each measurement update, the associated feature
|
| 62 |
+
vector is added to this list.
|
| 63 |
+
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
def __init__(self, mean, covariance, track_id, n_init, max_age,
|
| 67 |
+
feature=None):
|
| 68 |
+
self.mean = mean
|
| 69 |
+
self.covariance = covariance
|
| 70 |
+
self.track_id = track_id
|
| 71 |
+
self.hits = 1
|
| 72 |
+
self.age = 1
|
| 73 |
+
self.time_since_update = 0
|
| 74 |
+
|
| 75 |
+
self.state = TrackState.Tentative
|
| 76 |
+
self.features = []
|
| 77 |
+
if feature is not None:
|
| 78 |
+
self.features.append(feature)
|
| 79 |
+
|
| 80 |
+
self._n_init = n_init
|
| 81 |
+
self._max_age = max_age
|
| 82 |
+
|
| 83 |
+
def to_tlwh(self):
|
| 84 |
+
"""Get current position in bounding box format `(top left x, top left y,
|
| 85 |
+
width, height)`.
|
| 86 |
+
|
| 87 |
+
Returns
|
| 88 |
+
-------
|
| 89 |
+
ndarray
|
| 90 |
+
The bounding box.
|
| 91 |
+
|
| 92 |
+
"""
|
| 93 |
+
ret = self.mean[:4].copy()
|
| 94 |
+
ret[2] *= ret[3]
|
| 95 |
+
ret[:2] -= ret[2:] / 2
|
| 96 |
+
return ret
|
| 97 |
+
|
| 98 |
+
def to_tlbr(self):
|
| 99 |
+
"""Get current position in bounding box format `(min x, miny, max x,
|
| 100 |
+
max y)`.
|
| 101 |
+
|
| 102 |
+
Returns
|
| 103 |
+
-------
|
| 104 |
+
ndarray
|
| 105 |
+
The bounding box.
|
| 106 |
+
|
| 107 |
+
"""
|
| 108 |
+
ret = self.to_tlwh()
|
| 109 |
+
ret[2:] = ret[:2] + ret[2:]
|
| 110 |
+
return ret
|
| 111 |
+
|
| 112 |
+
def increment_age(self):
|
| 113 |
+
self.age += 1
|
| 114 |
+
self.time_since_update += 1
|
| 115 |
+
|
| 116 |
+
def predict(self, kf):
|
| 117 |
+
"""Propagate the state distribution to the current time step using a
|
| 118 |
+
Kalman filter prediction step.
|
| 119 |
+
|
| 120 |
+
Parameters
|
| 121 |
+
----------
|
| 122 |
+
kf : kalman_filter.KalmanFilter
|
| 123 |
+
The Kalman filter.
|
| 124 |
+
|
| 125 |
+
"""
|
| 126 |
+
self.mean, self.covariance = kf.predict(self.mean, self.covariance)
|
| 127 |
+
self.increment_age()
|
| 128 |
+
|
| 129 |
+
def update(self, kf, detection):
|
| 130 |
+
"""Perform Kalman filter measurement update step and update the feature
|
| 131 |
+
cache.
|
| 132 |
+
|
| 133 |
+
Parameters
|
| 134 |
+
----------
|
| 135 |
+
kf : kalman_filter.KalmanFilter
|
| 136 |
+
The Kalman filter.
|
| 137 |
+
detection : Detection
|
| 138 |
+
The associated detection.
|
| 139 |
+
|
| 140 |
+
"""
|
| 141 |
+
self.mean, self.covariance = kf.update(
|
| 142 |
+
self.mean, self.covariance, detection.to_xyah())
|
| 143 |
+
self.features.append(detection.feature)
|
| 144 |
+
|
| 145 |
+
self.hits += 1
|
| 146 |
+
self.time_since_update = 0
|
| 147 |
+
if self.state == TrackState.Tentative and self.hits >= self._n_init:
|
| 148 |
+
self.state = TrackState.Confirmed
|
| 149 |
+
|
| 150 |
+
def mark_missed(self):
|
| 151 |
+
"""Mark this track as missed (no association at the current time step).
|
| 152 |
+
"""
|
| 153 |
+
if self.state == TrackState.Tentative:
|
| 154 |
+
self.state = TrackState.Deleted
|
| 155 |
+
elif self.time_since_update > self._max_age:
|
| 156 |
+
self.state = TrackState.Deleted
|
| 157 |
+
|
| 158 |
+
def is_tentative(self):
|
| 159 |
+
"""Returns True if this track is tentative (unconfirmed).
|
| 160 |
+
"""
|
| 161 |
+
return self.state == TrackState.Tentative
|
| 162 |
+
|
| 163 |
+
def is_confirmed(self):
|
| 164 |
+
"""Returns True if this track is confirmed."""
|
| 165 |
+
return self.state == TrackState.Confirmed
|
| 166 |
+
|
| 167 |
+
def is_deleted(self):
|
| 168 |
+
"""Returns True if this track is dead and should be deleted."""
|
| 169 |
+
return self.state == TrackState.Deleted
|
tracker.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# vim: expandtab:ts=4:sw=4
|
| 2 |
+
from __future__ import absolute_import
|
| 3 |
+
import numpy as np
|
| 4 |
+
from . import kalman_filter
|
| 5 |
+
from . import linear_assignment
|
| 6 |
+
from . import iou_matching
|
| 7 |
+
from .track import Track
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class Tracker:
|
| 11 |
+
"""
|
| 12 |
+
This is the multi-target tracker.
|
| 13 |
+
|
| 14 |
+
Parameters
|
| 15 |
+
----------
|
| 16 |
+
metric : nn_matching.NearestNeighborDistanceMetric
|
| 17 |
+
A distance metric for measurement-to-track association.
|
| 18 |
+
max_age : int
|
| 19 |
+
Maximum number of missed misses before a track is deleted.
|
| 20 |
+
n_init : int
|
| 21 |
+
Number of consecutive detections before the track is confirmed. The
|
| 22 |
+
track state is set to `Deleted` if a miss occurs within the first
|
| 23 |
+
`n_init` frames.
|
| 24 |
+
|
| 25 |
+
Attributes
|
| 26 |
+
----------
|
| 27 |
+
metric : nn_matching.NearestNeighborDistanceMetric
|
| 28 |
+
The distance metric used for measurement to track association.
|
| 29 |
+
max_age : int
|
| 30 |
+
Maximum number of missed misses before a track is deleted.
|
| 31 |
+
n_init : int
|
| 32 |
+
Number of frames that a track remains in initialization phase.
|
| 33 |
+
kf : kalman_filter.KalmanFilter
|
| 34 |
+
A Kalman filter to filter target trajectories in image space.
|
| 35 |
+
tracks : List[Track]
|
| 36 |
+
The list of active tracks at the current time step.
|
| 37 |
+
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def __init__(self, metric, max_iou_distance=0.7, max_age=70, n_init=3):
|
| 41 |
+
self.metric = metric
|
| 42 |
+
self.max_iou_distance = max_iou_distance
|
| 43 |
+
self.max_age = max_age
|
| 44 |
+
self.n_init = n_init
|
| 45 |
+
|
| 46 |
+
self.kf = kalman_filter.KalmanFilter()
|
| 47 |
+
self.tracks = []
|
| 48 |
+
self._next_id = 1
|
| 49 |
+
|
| 50 |
+
def predict(self):
|
| 51 |
+
"""Propagate track state distributions one time step forward.
|
| 52 |
+
|
| 53 |
+
This function should be called once every time step, before `update`.
|
| 54 |
+
"""
|
| 55 |
+
for track in self.tracks:
|
| 56 |
+
track.predict(self.kf)
|
| 57 |
+
|
| 58 |
+
def increment_ages(self):
|
| 59 |
+
for track in self.tracks:
|
| 60 |
+
track.increment_age()
|
| 61 |
+
track.mark_missed()
|
| 62 |
+
|
| 63 |
+
def update(self, detections):
|
| 64 |
+
"""Perform measurement update and track management.
|
| 65 |
+
|
| 66 |
+
Parameters
|
| 67 |
+
----------
|
| 68 |
+
detections : List[deep_sort.detection.Detection]
|
| 69 |
+
A list of detections at the current time step.
|
| 70 |
+
|
| 71 |
+
"""
|
| 72 |
+
# Run matching cascade.
|
| 73 |
+
matches, unmatched_tracks, unmatched_detections = \
|
| 74 |
+
self._match(detections)
|
| 75 |
+
|
| 76 |
+
# Update track set.
|
| 77 |
+
for track_idx, detection_idx in matches:
|
| 78 |
+
self.tracks[track_idx].update(
|
| 79 |
+
self.kf, detections[detection_idx])
|
| 80 |
+
for track_idx in unmatched_tracks:
|
| 81 |
+
self.tracks[track_idx].mark_missed()
|
| 82 |
+
for detection_idx in unmatched_detections:
|
| 83 |
+
self._initiate_track(detections[detection_idx])
|
| 84 |
+
self.tracks = [t for t in self.tracks if not t.is_deleted()]
|
| 85 |
+
|
| 86 |
+
# Update distance metric.
|
| 87 |
+
active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]
|
| 88 |
+
features, targets = [], []
|
| 89 |
+
for track in self.tracks:
|
| 90 |
+
if not track.is_confirmed():
|
| 91 |
+
continue
|
| 92 |
+
features += track.features
|
| 93 |
+
targets += [track.track_id for _ in track.features]
|
| 94 |
+
track.features = []
|
| 95 |
+
self.metric.partial_fit(
|
| 96 |
+
np.asarray(features), np.asarray(targets), active_targets)
|
| 97 |
+
|
| 98 |
+
def _match(self, detections):
|
| 99 |
+
|
| 100 |
+
def gated_metric(tracks, dets, track_indices, detection_indices):
|
| 101 |
+
features = np.array([dets[i].feature for i in detection_indices])
|
| 102 |
+
targets = np.array([tracks[i].track_id for i in track_indices])
|
| 103 |
+
cost_matrix = self.metric.distance(features, targets)
|
| 104 |
+
cost_matrix = linear_assignment.gate_cost_matrix(
|
| 105 |
+
self.kf, cost_matrix, tracks, dets, track_indices,
|
| 106 |
+
detection_indices)
|
| 107 |
+
|
| 108 |
+
return cost_matrix
|
| 109 |
+
|
| 110 |
+
# Split track set into confirmed and unconfirmed tracks.
|
| 111 |
+
confirmed_tracks = [
|
| 112 |
+
i for i, t in enumerate(self.tracks) if t.is_confirmed()]
|
| 113 |
+
unconfirmed_tracks = [
|
| 114 |
+
i for i, t in enumerate(self.tracks) if not t.is_confirmed()]
|
| 115 |
+
|
| 116 |
+
# Associate confirmed tracks using appearance features.
|
| 117 |
+
matches_a, unmatched_tracks_a, unmatched_detections = \
|
| 118 |
+
linear_assignment.matching_cascade(
|
| 119 |
+
gated_metric, self.metric.matching_threshold, self.max_age,
|
| 120 |
+
self.tracks, detections, confirmed_tracks)
|
| 121 |
+
|
| 122 |
+
# Associate remaining tracks together with unconfirmed tracks using IOU.
|
| 123 |
+
iou_track_candidates = unconfirmed_tracks + [
|
| 124 |
+
k for k in unmatched_tracks_a if
|
| 125 |
+
self.tracks[k].time_since_update == 1]
|
| 126 |
+
unmatched_tracks_a = [
|
| 127 |
+
k for k in unmatched_tracks_a if
|
| 128 |
+
self.tracks[k].time_since_update != 1]
|
| 129 |
+
matches_b, unmatched_tracks_b, unmatched_detections = \
|
| 130 |
+
linear_assignment.min_cost_matching(
|
| 131 |
+
iou_matching.iou_cost, self.max_iou_distance, self.tracks,
|
| 132 |
+
detections, iou_track_candidates, unmatched_detections)
|
| 133 |
+
|
| 134 |
+
matches = matches_a + matches_b
|
| 135 |
+
unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
|
| 136 |
+
return matches, unmatched_tracks, unmatched_detections
|
| 137 |
+
|
| 138 |
+
def _initiate_track(self, detection):
|
| 139 |
+
mean, covariance = self.kf.initiate(detection.to_xyah())
|
| 140 |
+
self.tracks.append(Track(
|
| 141 |
+
mean, covariance, self._next_id, self.n_init, self.max_age,
|
| 142 |
+
detection.feature))
|
| 143 |
+
self._next_id += 1
|