Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +44 -35
- .gitignore +5 -0
- 20251029-detection.pt +3 -0
- 20251029-keypoint.pt +3 -0
- README.md +132 -0
- SV_kp.engine +3 -0
- __pycache__/keypoint_evaluation.cpython-312.pyc +0 -0
- __pycache__/keypoint_helper.cpython-312.pyc +0 -0
- __pycache__/keypoint_helper_v2.cpython-312.pyc +3 -0
- __pycache__/keypoint_helper_v2.cpython-312.pyc.2609775282608 +0 -0
- __pycache__/keypoint_helper_v2_optimized.cpython-312.pyc +3 -0
- __pycache__/keypoint_helper_v2_optimized.cpython-312.pyc.1837368399824 +3 -0
- __pycache__/keypoint_helper_v2_optimized.cpython-312.pyc.2364780042192 +3 -0
- __pycache__/keypoint_helper_v2_optimized.cpython-312.pyc.2618992613328 +3 -0
- __pycache__/miner.cpython-312.pyc +0 -0
- __pycache__/miner.cpython-312.pyc.2050184619568 +0 -0
- __pycache__/miner.cpython-312.pyc.2701627401776 +0 -0
- __pycache__/miner1.cpython-312.pyc +0 -0
- __pycache__/miner2.cpython-312.pyc +0 -0
- __pycache__/miner3.cpython-312.pyc +0 -0
- __pycache__/pitch.cpython-312.pyc +0 -0
- __pycache__/test_predict_batch.cpython-312.pyc +0 -0
- best.engine +3 -0
- best.onnx +3 -0
- best.pt +3 -0
- config.yml +24 -0
- detection.onnx +3 -0
- detection.pt +3 -0
- evaluate_from_url.py +286 -0
- football_object_detection.pt +3 -0
- football_pitch_template.png +0 -0
- hrnetv2_w48.yaml +35 -0
- inspect_yolo_model.py +155 -0
- keypoint +3 -0
- keypoint.pt +3 -0
- keypoint_evaluation.py +956 -0
- keypoint_helper.py +115 -0
- keypoint_helper_v2.py +0 -0
- keypoint_helper_v2_optimized.py +0 -0
- miner.py +881 -0
- miner1.py +685 -0
- miner2.py +953 -0
- miner3.py +952 -0
- object-detection.onnx +3 -0
- osnet_ain.pyc +0 -0
- osnet_model.pth.tar-100 +3 -0
- pitch.py +687 -0
- player.pt +3 -0
- player.py +389 -0
- team_cluster.pyc +0 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,44 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
SV_kp.engine filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
osnet_model.pth.tar-100 filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
__pycache__/keypoint_helper_v2.cpython-312.pyc filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
__pycache__/keypoint_helper_v2_optimized.cpython-312.pyc filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
__pycache__/keypoint_helper_v2_optimized.cpython-312.pyc.1837368399824 filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
__pycache__/keypoint_helper_v2_optimized.cpython-312.pyc.2364780042192 filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
__pycache__/keypoint_helper_v2_optimized.cpython-312.pyc.2618992613328 filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
best.engine filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
keypoint filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
venv
|
| 2 |
+
outputs
|
| 3 |
+
outputs-keypoints
|
| 4 |
+
outputs-detections
|
| 5 |
+
*.mp4
|
20251029-detection.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8bbacfcb38e38b1b8816788e9e6e845160533719a0b87b693d58b932380d0d28
|
| 3 |
+
size 152961687
|
20251029-keypoint.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6dd10dba85895c92760cdb5a99c5cfca899c68f361a66c5448f38a187280ee1f
|
| 3 |
+
size 6849672
|
README.md
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
🚀 Example Chute for Turbovision 🪂
|
| 2 |
+
|
| 3 |
+
This repository demonstrates how to deploy a Chute via the Turbovision CLI, hosted on Hugging Face Hub. It serves as a minimal example showcasing the required structure and workflow for integrating machine learning models, preprocessing, and orchestration into a reproducible Chute environment.
|
| 4 |
+
|
| 5 |
+
## Repository Structure
|
| 6 |
+
|
| 7 |
+
The following two files must be present (in their current locations) for a successful deployment — their content can be modified as needed:
|
| 8 |
+
|
| 9 |
+
| File | Purpose |
|
| 10 |
+
|------|---------|
|
| 11 |
+
| `miner.py` | Defines the ML model type(s), orchestration, and all pre/postprocessing logic. |
|
| 12 |
+
| `config.yml` | Specifies machine configuration (e.g., GPU type, memory, environment variables). |
|
| 13 |
+
|
| 14 |
+
Other files — e.g., model weights, utility scripts, or dependencies — are optional and can be included as needed for your model.
|
| 15 |
+
|
| 16 |
+
> **Note**: Any required assets must be defined or contained within this repo, which is fully open-source, since all network-related operations (downloading challenge data, weights, etc.) are disabled inside the Chute.
|
| 17 |
+
|
| 18 |
+
## Overview
|
| 19 |
+
|
| 20 |
+
Below is a high-level diagram showing the interaction between Huggingface, Chutes and Turbovision:
|
| 21 |
+
|
| 22 |
+
```
|
| 23 |
+
┌─────────────┐ ┌──────────┐ ┌──────────────┐
|
| 24 |
+
│ HuggingFace │ ───> │ Chutes │ ───> │ Turbovision │
|
| 25 |
+
│ Hub │ │ .ai │ │ Validator │
|
| 26 |
+
└─────────────┘ └──────────┘ └──────────────┘
|
| 27 |
+
```
|
| 28 |
+
|
| 29 |
+
## Local Testing
|
| 30 |
+
|
| 31 |
+
After editing the `config.yml` and `miner.py` and saving it into your Huggingface Repo, you will want to test it works locally.
|
| 32 |
+
|
| 33 |
+
1. **Copy the template file** `scorevision/chute_template/turbovision_chute.py.j2` as a python file called `my_chute.py` and fill in the missing variables:
|
| 34 |
+
|
| 35 |
+
```python
|
| 36 |
+
HF_REPO_NAME = "{{ huggingface_repository_name }}"
|
| 37 |
+
HF_REPO_REVISION = "{{ huggingface_repository_revision }}"
|
| 38 |
+
CHUTES_USERNAME = "{{ chute_username }}"
|
| 39 |
+
CHUTE_NAME = "{{ chute_name }}"
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
2. **Run the following command to build the chute locally** (Caution: there are known issues with the docker location when running this on a mac):
|
| 43 |
+
|
| 44 |
+
```bash
|
| 45 |
+
chutes build my_chute:chute --local --public
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
3. **Run the name of the docker image just built** (i.e. `CHUTE_NAME`) and enter it:
|
| 49 |
+
|
| 50 |
+
```bash
|
| 51 |
+
docker run -p 8000:8000 -e CHUTES_EXECUTION_CONTEXT=REMOTE -it <image-name> /bin/bash
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
4. **Run the file from within the container**:
|
| 55 |
+
|
| 56 |
+
```bash
|
| 57 |
+
chutes run my_chute:chute --dev --debug
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
5. **In another terminal, test the local endpoints** to ensure there are no bugs:
|
| 61 |
+
|
| 62 |
+
```bash
|
| 63 |
+
# Health check
|
| 64 |
+
curl -X POST http://localhost:8000/health -d '{}'
|
| 65 |
+
|
| 66 |
+
# Prediction test
|
| 67 |
+
curl -X POST http://localhost:8000/predict -d '{"url": "https://scoredata.me/2025_03_14/35ae7a/h1_0f2ca0.mp4","meta": {}}'
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
## Live Testing
|
| 71 |
+
|
| 72 |
+
If you have any chute with the same name (i.e. from a previous deployment), ensure you delete that first (or you will get an error when trying to build).
|
| 73 |
+
|
| 74 |
+
1. **List existing chutes**:
|
| 75 |
+
|
| 76 |
+
```bash
|
| 77 |
+
chutes chutes list
|
| 78 |
+
```
|
| 79 |
+
|
| 80 |
+
Take note of the chute id that you wish to delete (if any):
|
| 81 |
+
|
| 82 |
+
```bash
|
| 83 |
+
chutes chutes delete <chute-id>
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
2. **You should also delete its associated image**:
|
| 87 |
+
|
| 88 |
+
```bash
|
| 89 |
+
chutes images list
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
Take note of the chute image id:
|
| 93 |
+
|
| 94 |
+
```bash
|
| 95 |
+
chutes images delete <chute-image-id>
|
| 96 |
+
```
|
| 97 |
+
|
| 98 |
+
3. **Use Turbovision's CLI to build, deploy and commit on-chain**:
|
| 99 |
+
|
| 100 |
+
```bash
|
| 101 |
+
sv -vv push
|
| 102 |
+
```
|
| 103 |
+
|
| 104 |
+
> **Note**: You can skip the on-chain commit using `--no-commit`. You can also specify a past huggingface revision to point to using `--revision` and/or the local files you want to upload to your huggingface repo using `--model-path`.
|
| 105 |
+
|
| 106 |
+
4. **When completed, warm up the chute** (if its cold 🧊):
|
| 107 |
+
|
| 108 |
+
You can confirm its status using `chutes chutes list` or `chutes chutes get <chute-id>` if you already know its id.
|
| 109 |
+
|
| 110 |
+
> **Note**: Warming up can sometimes take a while but if the chute runs without errors (should be if you've tested locally first) and there are sufficient nodes (i.e. machines) available matching the `config.yml` you specified, the chute should become hot 🔥!
|
| 111 |
+
|
| 112 |
+
```bash
|
| 113 |
+
chutes warmup <chute-id>
|
| 114 |
+
```
|
| 115 |
+
|
| 116 |
+
5. **Test the chute's endpoints**:
|
| 117 |
+
|
| 118 |
+
```bash
|
| 119 |
+
# Health check
|
| 120 |
+
curl -X POST https://<YOUR-CHUTE-SLUG>.chutes.ai/health -d '{}' -H "Authorization: Bearer $CHUTES_API_KEY"
|
| 121 |
+
|
| 122 |
+
# Prediction
|
| 123 |
+
curl -X POST https://<YOUR-CHUTE-SLUG>.chutes.ai/predict -d '{"url": "https://scoredata.me/2025_03_14/35ae7a/h1_0f2ca0.mp4","meta": {}}' -H "Authorization: Bearer $CHUTES_API_KEY"
|
| 124 |
+
```
|
| 125 |
+
|
| 126 |
+
6. **Test what your chute would get on a validator**:
|
| 127 |
+
|
| 128 |
+
This also applies any validation/integrity checks which may fail if you did not use the Turbovision CLI above to deploy the chute:
|
| 129 |
+
|
| 130 |
+
```bash
|
| 131 |
+
sv -vv run-once
|
| 132 |
+
```
|
SV_kp.engine
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f99452eb79e064189e2758abd20a78845a5b639fc8b9c4bc650519c83e13e8db
|
| 3 |
+
size 368289641
|
__pycache__/keypoint_evaluation.cpython-312.pyc
ADDED
|
Binary file (36 kB). View file
|
|
|
__pycache__/keypoint_helper.cpython-312.pyc
ADDED
|
Binary file (4.74 kB). View file
|
|
|
__pycache__/keypoint_helper_v2.cpython-312.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c6c301a3602090dab908225fea736b9f6211ecd4c1733cfda40dd3e274e67ed7
|
| 3 |
+
size 119899
|
__pycache__/keypoint_helper_v2.cpython-312.pyc.2609775282608
ADDED
|
Binary file (98.2 kB). View file
|
|
|
__pycache__/keypoint_helper_v2_optimized.cpython-312.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d60d623ae2f0ce1ba3cfc4e42058914cb6acccaaf082ec098556a55c69ec99a2
|
| 3 |
+
size 135087
|
__pycache__/keypoint_helper_v2_optimized.cpython-312.pyc.1837368399824
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:07b377a3473645ff304d0070a9fc71891639557d2f2b4f19ffb0fc108bdc2666
|
| 3 |
+
size 134432
|
__pycache__/keypoint_helper_v2_optimized.cpython-312.pyc.2364780042192
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:34253c900b87954bd1f34881e33d9d8cf2fba247b4a65f17cd21673ba837d94d
|
| 3 |
+
size 133125
|
__pycache__/keypoint_helper_v2_optimized.cpython-312.pyc.2618992613328
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:86d3822ba6714e8dd6300f6d6e034c5c69191dca702caa4837326978c503fa0e
|
| 3 |
+
size 133215
|
__pycache__/miner.cpython-312.pyc
ADDED
|
Binary file (33.1 kB). View file
|
|
|
__pycache__/miner.cpython-312.pyc.2050184619568
ADDED
|
Binary file (29.7 kB). View file
|
|
|
__pycache__/miner.cpython-312.pyc.2701627401776
ADDED
|
Binary file (32.8 kB). View file
|
|
|
__pycache__/miner1.cpython-312.pyc
ADDED
|
Binary file (23.6 kB). View file
|
|
|
__pycache__/miner2.cpython-312.pyc
ADDED
|
Binary file (34 kB). View file
|
|
|
__pycache__/miner3.cpython-312.pyc
ADDED
|
Binary file (37 kB). View file
|
|
|
__pycache__/pitch.cpython-312.pyc
ADDED
|
Binary file (31.2 kB). View file
|
|
|
__pycache__/test_predict_batch.cpython-312.pyc
ADDED
|
Binary file (29.5 kB). View file
|
|
|
best.engine
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5d93cf7017bf7190a24f29e48548493aaf8ebd8f96a8257ebb8a0f42bd266e7b
|
| 3 |
+
size 9167745
|
best.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f45602c5c3f13822c4bdf35d06b505dc4a47c94a14ed60943ccc61c6992433f
|
| 3 |
+
size 5908859
|
best.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ce387539acef635b248dc99b1e34e24993de604db59aa5dfd3c6f8c696cac003
|
| 3 |
+
size 5433178
|
config.yml
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Image:
|
| 2 |
+
from_base: parachutes/python:3.12
|
| 3 |
+
run_command:
|
| 4 |
+
- pip install --upgrade setuptools wheel
|
| 5 |
+
- pip install ultralytics==8.3.222 opencv-python-headless numpy pydantic
|
| 6 |
+
- pip install scikit-learn
|
| 7 |
+
- pip install onnxruntime-gpu
|
| 8 |
+
set_workdir: /app
|
| 9 |
+
|
| 10 |
+
NodeSelector:
|
| 11 |
+
gpu_count: 1
|
| 12 |
+
min_vram_gb_per_gpu: 16
|
| 13 |
+
exclude:
|
| 14 |
+
- "5090"
|
| 15 |
+
- b200
|
| 16 |
+
- h200
|
| 17 |
+
- mi300x
|
| 18 |
+
|
| 19 |
+
Chute:
|
| 20 |
+
timeout_seconds: 900
|
| 21 |
+
concurrency: 4
|
| 22 |
+
max_instances: 5
|
| 23 |
+
scaling_threshold: 0.5
|
| 24 |
+
shutdown_after_seconds: 3600
|
detection.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7b51470cb703f5a9a789df38674b67d4bbe7f8f31846d69dbc97ce484f790cf9
|
| 3 |
+
size 10245169
|
detection.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2ad3e89b658d2626c34174f6799d240ffd37cfe45752c0ce6ef73b05935042e0
|
| 3 |
+
size 52014742
|
evaluate_from_url.py
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import tempfile
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from typing import List, Tuple, Dict
|
| 6 |
+
import urllib.request
|
| 7 |
+
import urllib.parse
|
| 8 |
+
import urllib.error
|
| 9 |
+
|
| 10 |
+
import cv2
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
from miner1 import TVFrameResult, BoundingBox
|
| 14 |
+
from keypoint_evaluation import (
|
| 15 |
+
load_template_from_file,
|
| 16 |
+
)
|
| 17 |
+
from test_predict_batch import (
|
| 18 |
+
evaluate_keypoints_batch,
|
| 19 |
+
visualize_keypoint_evaluation,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def fetch_json_data(url: str) -> dict:
|
| 24 |
+
"""Fetch JSON data from URL."""
|
| 25 |
+
print(f"Fetching data from {url}...")
|
| 26 |
+
|
| 27 |
+
# Create a request with headers to avoid 403 errors
|
| 28 |
+
req = urllib.request.Request(url)
|
| 29 |
+
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36')
|
| 30 |
+
req.add_header('Accept', 'application/json, text/plain, */*')
|
| 31 |
+
req.add_header('Accept-Language', 'en-US,en;q=0.9')
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
with urllib.request.urlopen(req) as response:
|
| 35 |
+
data = json.loads(response.read().decode('utf-8'))
|
| 36 |
+
predictions = data.get('predictions', {})
|
| 37 |
+
frames_list = predictions.get('frames', [])
|
| 38 |
+
print(f"Successfully fetched data with {len(frames_list)} frames")
|
| 39 |
+
return data
|
| 40 |
+
except urllib.error.HTTPError as e:
|
| 41 |
+
print(f"HTTP Error {e.code}: {e.reason}")
|
| 42 |
+
if e.code == 403:
|
| 43 |
+
print("403 Forbidden: The server is blocking the request. This might require authentication or different headers.")
|
| 44 |
+
raise
|
| 45 |
+
except urllib.error.URLError as e:
|
| 46 |
+
print(f"URL Error: {e.reason}")
|
| 47 |
+
raise
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def download_video(video_url: str, output_path: Path) -> Path:
|
| 51 |
+
"""Download video from URL to local file."""
|
| 52 |
+
print(f"Downloading video from {video_url}...")
|
| 53 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 54 |
+
urllib.request.urlretrieve(video_url, str(output_path))
|
| 55 |
+
print(f"Video downloaded to {output_path}")
|
| 56 |
+
return output_path
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def extract_frames_from_video(video_path: Path, frame_ids: List[int] = None) -> Dict[int, np.ndarray]:
|
| 60 |
+
"""Extract frames from video, optionally only specific frame IDs."""
|
| 61 |
+
print(f"Extracting frames from {video_path}...")
|
| 62 |
+
cap = cv2.VideoCapture(str(video_path))
|
| 63 |
+
if not cap.isOpened():
|
| 64 |
+
raise RuntimeError(f"Unable to open video: {video_path}")
|
| 65 |
+
|
| 66 |
+
frames = {}
|
| 67 |
+
frame_count = 0
|
| 68 |
+
|
| 69 |
+
while True:
|
| 70 |
+
ret, frame = cap.read()
|
| 71 |
+
if not ret:
|
| 72 |
+
break
|
| 73 |
+
|
| 74 |
+
if frame_ids is None or frame_count in frame_ids:
|
| 75 |
+
frames[frame_count] = frame
|
| 76 |
+
|
| 77 |
+
frame_count += 1
|
| 78 |
+
|
| 79 |
+
cap.release()
|
| 80 |
+
print(f"Extracted {len(frames)} frames from video")
|
| 81 |
+
return frames
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def convert_keypoints_format(json_keypoints: List[List[int]]) -> List[Tuple[int, int]]:
|
| 85 |
+
"""Convert keypoints from JSON format [[x,y], [x,y], ...] to List[Tuple[int, int]]."""
|
| 86 |
+
return [(int(kp[0]), int(kp[1])) for kp in json_keypoints]
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def convert_json_to_tvframe_results(
|
| 90 |
+
json_data: dict,
|
| 91 |
+
frames: Dict[int, np.ndarray],
|
| 92 |
+
) -> List[TVFrameResult]:
|
| 93 |
+
"""
|
| 94 |
+
Convert JSON data to TVFrameResult objects.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
json_data: JSON data containing predictions with frames, boxes, and keypoints
|
| 98 |
+
frames: Dictionary mapping frame_id to frame image
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
List of TVFrameResult objects
|
| 102 |
+
"""
|
| 103 |
+
predictions = json_data.get('predictions', {})
|
| 104 |
+
frames_data = predictions.get('frames', [])
|
| 105 |
+
|
| 106 |
+
results = []
|
| 107 |
+
for frame_data in frames_data:
|
| 108 |
+
frame_id = frame_data.get('frame_id')
|
| 109 |
+
if frame_id not in frames:
|
| 110 |
+
print(f"Warning: Frame {frame_id} not found in extracted frames, skipping")
|
| 111 |
+
continue
|
| 112 |
+
|
| 113 |
+
# Convert boxes
|
| 114 |
+
json_boxes = frame_data.get('boxes', [])
|
| 115 |
+
boxes = []
|
| 116 |
+
for box_data in json_boxes:
|
| 117 |
+
box = BoundingBox(
|
| 118 |
+
x1=int(box_data.get('x1', 0)),
|
| 119 |
+
y1=int(box_data.get('y1', 0)),
|
| 120 |
+
x2=int(box_data.get('x2', 0)),
|
| 121 |
+
y2=int(box_data.get('y2', 0)),
|
| 122 |
+
cls_id=int(box_data.get('cls_id', 0)),
|
| 123 |
+
conf=float(box_data.get('conf', 0.0)),
|
| 124 |
+
)
|
| 125 |
+
boxes.append(box)
|
| 126 |
+
|
| 127 |
+
# Convert keypoints
|
| 128 |
+
json_keypoints = frame_data.get('keypoints', [])
|
| 129 |
+
keypoints = convert_keypoints_format(json_keypoints)
|
| 130 |
+
|
| 131 |
+
result = TVFrameResult(
|
| 132 |
+
frame_id=frame_id,
|
| 133 |
+
boxes=boxes,
|
| 134 |
+
keypoints=keypoints,
|
| 135 |
+
)
|
| 136 |
+
results.append(result)
|
| 137 |
+
|
| 138 |
+
return results
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def evaluate_keypoints_from_json(
|
| 142 |
+
json_data: dict,
|
| 143 |
+
frames: Dict[int, np.ndarray],
|
| 144 |
+
template_image: np.ndarray,
|
| 145 |
+
template_keypoints: List[Tuple[int, int]],
|
| 146 |
+
visualization_output_dir: Path = None,
|
| 147 |
+
) -> Dict[str, float]:
|
| 148 |
+
"""
|
| 149 |
+
Evaluate keypoint accuracy from JSON data using the same function as test_predict_batch.py.
|
| 150 |
+
|
| 151 |
+
Args:
|
| 152 |
+
json_data: JSON data containing predictions with frames and keypoints
|
| 153 |
+
frames: Dictionary mapping frame_id to frame image
|
| 154 |
+
template_image: Template image for evaluation
|
| 155 |
+
template_keypoints: Template keypoints
|
| 156 |
+
visualization_output_dir: Optional directory to save visualization images
|
| 157 |
+
|
| 158 |
+
Returns:
|
| 159 |
+
Dictionary with keypoint evaluation statistics
|
| 160 |
+
"""
|
| 161 |
+
# Convert JSON data to TVFrameResult objects
|
| 162 |
+
results = convert_json_to_tvframe_results(json_data, frames)
|
| 163 |
+
|
| 164 |
+
if len(results) == 0:
|
| 165 |
+
print("No valid frames found in JSON data")
|
| 166 |
+
return {
|
| 167 |
+
"keypoint_avg_score": 0.0,
|
| 168 |
+
"keypoint_valid_frames": 0,
|
| 169 |
+
"keypoint_total_frames": 0,
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
print(f"Evaluating {len(results)} frames using evaluate_keypoints_batch...")
|
| 173 |
+
|
| 174 |
+
# Use the same evaluation function as test_predict_batch.py
|
| 175 |
+
stats = evaluate_keypoints_batch(
|
| 176 |
+
results=results,
|
| 177 |
+
original_frames=frames,
|
| 178 |
+
template_image=template_image,
|
| 179 |
+
template_keypoints=template_keypoints,
|
| 180 |
+
visualization_output_dir=visualization_output_dir,
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
print("\n=== Keypoint Evaluation Results ===")
|
| 184 |
+
print(f"Total frames: {stats['keypoint_total_frames']}")
|
| 185 |
+
print(f"Valid frames: {stats['keypoint_valid_frames']}")
|
| 186 |
+
print(f"Average score: {stats['keypoint_avg_score']:.3f}")
|
| 187 |
+
print(f"Max score: {stats['keypoint_max_score']:.3f}")
|
| 188 |
+
print(f"Min score: {stats['keypoint_min_score']:.3f}")
|
| 189 |
+
print(f"Frames with score > 0.5: {stats['keypoint_frames_above_0.5']}")
|
| 190 |
+
print(f"Frames with score > 0.7: {stats['keypoint_frames_above_0.7']}")
|
| 191 |
+
|
| 192 |
+
return stats
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def parse_args() -> argparse.Namespace:
|
| 196 |
+
parser = argparse.ArgumentParser(
|
| 197 |
+
description="Fetch video and keypoint data from URL, evaluate keypoints, and visualize results."
|
| 198 |
+
)
|
| 199 |
+
parser.add_argument(
|
| 200 |
+
"--url",
|
| 201 |
+
type=str,
|
| 202 |
+
default="https://pub-7b4130b6af75472f800371248bca15b6.r2.dev/scorevision/results_soccer/5Fnhz5fDihvno4DfssfRogL84VFvdDRRsgu19grbqEDPbJGv/responses/007115302-f9bd4226d1f4248c782a3179764e3203ce2fc520642eed4f7b02c40e61db55eb.json",
|
| 203 |
+
help="URL to fetch JSON data containing video_url and predictions.",
|
| 204 |
+
)
|
| 205 |
+
parser.add_argument(
|
| 206 |
+
"--template-image",
|
| 207 |
+
type=Path,
|
| 208 |
+
default='football_pitch_template.png',
|
| 209 |
+
help="Path to football pitch template image.",
|
| 210 |
+
)
|
| 211 |
+
parser.add_argument(
|
| 212 |
+
"--output-dir",
|
| 213 |
+
type=Path,
|
| 214 |
+
default='outputs/url_evaluation',
|
| 215 |
+
help="Directory to save visualizations and downloaded video.",
|
| 216 |
+
)
|
| 217 |
+
parser.add_argument(
|
| 218 |
+
"--delete-video",
|
| 219 |
+
action="store_true",
|
| 220 |
+
help="Delete downloaded video file after processing (default: keep video).",
|
| 221 |
+
)
|
| 222 |
+
return parser.parse_args()
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def main() -> None:
|
| 226 |
+
args = parse_args()
|
| 227 |
+
|
| 228 |
+
# Create output directory
|
| 229 |
+
args.output_dir.mkdir(parents=True, exist_ok=True)
|
| 230 |
+
|
| 231 |
+
# Fetch JSON data
|
| 232 |
+
json_data = fetch_json_data(args.url)
|
| 233 |
+
|
| 234 |
+
# Get video URL
|
| 235 |
+
video_url = json_data.get('video_url')
|
| 236 |
+
if not video_url:
|
| 237 |
+
raise ValueError("No video_url found in JSON data")
|
| 238 |
+
|
| 239 |
+
# Download video
|
| 240 |
+
video_filename = Path(urllib.parse.urlparse(video_url).path).name
|
| 241 |
+
if not video_filename:
|
| 242 |
+
video_filename = "video.mp4"
|
| 243 |
+
video_path = args.output_dir / video_filename
|
| 244 |
+
|
| 245 |
+
download_video(video_url, video_path)
|
| 246 |
+
|
| 247 |
+
# Get video filename without extension for folder naming
|
| 248 |
+
video_name_without_ext = Path(video_filename).stem
|
| 249 |
+
|
| 250 |
+
# Get frame IDs from JSON
|
| 251 |
+
predictions = json_data.get('predictions', {})
|
| 252 |
+
frames_data = predictions.get('frames', [])
|
| 253 |
+
frame_ids = [frame_data.get('frame_id') for frame_data in frames_data]
|
| 254 |
+
|
| 255 |
+
# Extract frames from video
|
| 256 |
+
frames = extract_frames_from_video(video_path, frame_ids=frame_ids if frame_ids else None)
|
| 257 |
+
|
| 258 |
+
# Load template
|
| 259 |
+
template_image, template_keypoints = load_template_from_file(str(args.template_image))
|
| 260 |
+
|
| 261 |
+
# Create visualization directory with video filename
|
| 262 |
+
visualization_dir = args.output_dir / f"visualizations_{video_name_without_ext}"
|
| 263 |
+
|
| 264 |
+
# Evaluate keypoints
|
| 265 |
+
stats = evaluate_keypoints_from_json(
|
| 266 |
+
json_data=json_data,
|
| 267 |
+
frames=frames,
|
| 268 |
+
template_image=template_image,
|
| 269 |
+
template_keypoints=template_keypoints,
|
| 270 |
+
visualization_output_dir=visualization_dir,
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
# Clean up video if requested
|
| 274 |
+
if args.delete_video:
|
| 275 |
+
video_path.unlink()
|
| 276 |
+
print(f"Deleted video file: {video_path}")
|
| 277 |
+
else:
|
| 278 |
+
print(f"Video saved at: {video_path}")
|
| 279 |
+
|
| 280 |
+
print(f"\nResults saved to: {args.output_dir}")
|
| 281 |
+
print(f"Visualizations saved to: {visualization_dir}")
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
if __name__ == "__main__":
|
| 285 |
+
main()
|
| 286 |
+
|
football_object_detection.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8bbacfcb38e38b1b8816788e9e6e845160533719a0b87b693d58b932380d0d28
|
| 3 |
+
size 152961687
|
football_pitch_template.png
ADDED
|
hrnetv2_w48.yaml
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MODEL:
|
| 2 |
+
IMAGE_SIZE: [960, 540]
|
| 3 |
+
NUM_JOINTS: 58
|
| 4 |
+
PRETRAIN: ''
|
| 5 |
+
EXTRA:
|
| 6 |
+
FINAL_CONV_KERNEL: 1
|
| 7 |
+
STAGE1:
|
| 8 |
+
NUM_MODULES: 1
|
| 9 |
+
NUM_BRANCHES: 1
|
| 10 |
+
BLOCK: BOTTLENECK
|
| 11 |
+
NUM_BLOCKS: [4]
|
| 12 |
+
NUM_CHANNELS: [64]
|
| 13 |
+
FUSE_METHOD: SUM
|
| 14 |
+
STAGE2:
|
| 15 |
+
NUM_MODULES: 1
|
| 16 |
+
NUM_BRANCHES: 2
|
| 17 |
+
BLOCK: BASIC
|
| 18 |
+
NUM_BLOCKS: [4, 4]
|
| 19 |
+
NUM_CHANNELS: [48, 96]
|
| 20 |
+
FUSE_METHOD: SUM
|
| 21 |
+
STAGE3:
|
| 22 |
+
NUM_MODULES: 4
|
| 23 |
+
NUM_BRANCHES: 3
|
| 24 |
+
BLOCK: BASIC
|
| 25 |
+
NUM_BLOCKS: [4, 4, 4]
|
| 26 |
+
NUM_CHANNELS: [48, 96, 192]
|
| 27 |
+
FUSE_METHOD: SUM
|
| 28 |
+
STAGE4:
|
| 29 |
+
NUM_MODULES: 3
|
| 30 |
+
NUM_BRANCHES: 4
|
| 31 |
+
BLOCK: BASIC
|
| 32 |
+
NUM_BLOCKS: [4, 4, 4, 4]
|
| 33 |
+
NUM_CHANNELS: [48, 96, 192, 384]
|
| 34 |
+
FUSE_METHOD: SUM
|
| 35 |
+
|
inspect_yolo_model.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Script to inspect a YOLO .pt model and determine its variant (nano, small, medium, large, xlarge).
|
| 3 |
+
"""
|
| 4 |
+
import argparse
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
import torch
|
| 7 |
+
from ultralytics import YOLO
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def inspect_yolo_model(model_path: Path):
|
| 11 |
+
"""Inspect YOLO model to determine variant and architecture details."""
|
| 12 |
+
print(f"Inspecting model: {model_path}")
|
| 13 |
+
print("=" * 60)
|
| 14 |
+
|
| 15 |
+
# Method 1: Load with Ultralytics and check metadata
|
| 16 |
+
try:
|
| 17 |
+
model = YOLO(str(model_path))
|
| 18 |
+
|
| 19 |
+
# Check model info
|
| 20 |
+
print("\n--- Model Information ---")
|
| 21 |
+
print(f"Model type: {type(model.model)}")
|
| 22 |
+
|
| 23 |
+
# Try to get model name from metadata
|
| 24 |
+
if hasattr(model, 'model') and hasattr(model.model, 'yaml'):
|
| 25 |
+
yaml_path = model.model.yaml
|
| 26 |
+
print(f"YAML config: {yaml_path}")
|
| 27 |
+
if yaml_path:
|
| 28 |
+
# Extract variant from yaml path
|
| 29 |
+
yaml_name = Path(yaml_path).stem if isinstance(yaml_path, (str, Path)) else str(yaml_path)
|
| 30 |
+
print(f"YAML name: {yaml_name}")
|
| 31 |
+
# Common patterns: yolo11n.yaml, yolo11s.yaml, yolo11m.yaml, yolo11l.yaml, yolo11x.yaml
|
| 32 |
+
# or yolov8n.yaml, yolov8s.yaml, etc.
|
| 33 |
+
if 'n' in yaml_name.lower():
|
| 34 |
+
variant = "Nano (n)"
|
| 35 |
+
elif 's' in yaml_name.lower():
|
| 36 |
+
variant = "Small (s)"
|
| 37 |
+
elif 'm' in yaml_name.lower():
|
| 38 |
+
variant = "Medium (m)"
|
| 39 |
+
elif 'l' in yaml_name.lower():
|
| 40 |
+
variant = "Large (l)"
|
| 41 |
+
elif 'x' in yaml_name.lower():
|
| 42 |
+
variant = "XLarge (x)"
|
| 43 |
+
else:
|
| 44 |
+
variant = "Unknown"
|
| 45 |
+
print(f"Detected variant: {variant}")
|
| 46 |
+
|
| 47 |
+
# Check model metadata if available
|
| 48 |
+
if hasattr(model.model, 'names'):
|
| 49 |
+
print(f"Number of classes: {len(model.model.names)}")
|
| 50 |
+
print(f"Class names: {list(model.model.names.values())[:5]}...") # Show first 5
|
| 51 |
+
|
| 52 |
+
# Get model info summary
|
| 53 |
+
print("\n--- Model Summary ---")
|
| 54 |
+
try:
|
| 55 |
+
info = model.info(verbose=False)
|
| 56 |
+
print(info)
|
| 57 |
+
except:
|
| 58 |
+
pass
|
| 59 |
+
|
| 60 |
+
# Count parameters
|
| 61 |
+
if hasattr(model.model, 'parameters'):
|
| 62 |
+
total_params = sum(p.numel() for p in model.model.parameters())
|
| 63 |
+
trainable_params = sum(p.numel() for p in model.model.parameters() if p.requires_grad)
|
| 64 |
+
print(f"\n--- Parameter Count ---")
|
| 65 |
+
print(f"Total parameters: {total_params:,}")
|
| 66 |
+
print(f"Trainable parameters: {trainable_params:,}")
|
| 67 |
+
|
| 68 |
+
# Rough estimates for YOLO variants (these vary by version but give a ballpark)
|
| 69 |
+
if total_params < 3_000_000:
|
| 70 |
+
size_estimate = "Nano (n) - typically < 3M params"
|
| 71 |
+
elif total_params < 12_000_000:
|
| 72 |
+
size_estimate = "Small (s) - typically 3-12M params"
|
| 73 |
+
elif total_params < 26_000_000:
|
| 74 |
+
size_estimate = "Medium (m) - typically 12-26M params"
|
| 75 |
+
elif total_params < 44_000_000:
|
| 76 |
+
size_estimate = "Large (l) - typically 26-44M params"
|
| 77 |
+
else:
|
| 78 |
+
size_estimate = "XLarge (x) - typically > 44M params"
|
| 79 |
+
print(f"Size estimate: {size_estimate}")
|
| 80 |
+
|
| 81 |
+
except Exception as e:
|
| 82 |
+
print(f"Error loading with Ultralytics: {e}")
|
| 83 |
+
print("\nTrying alternative method...")
|
| 84 |
+
|
| 85 |
+
# Method 2: Direct PyTorch inspection
|
| 86 |
+
print("\n" + "=" * 60)
|
| 87 |
+
print("--- Direct PyTorch Inspection ---")
|
| 88 |
+
try:
|
| 89 |
+
checkpoint = torch.load(str(model_path), map_location='cpu')
|
| 90 |
+
|
| 91 |
+
# Check for metadata
|
| 92 |
+
if 'model' in checkpoint:
|
| 93 |
+
model_dict = checkpoint['model']
|
| 94 |
+
if isinstance(model_dict, dict):
|
| 95 |
+
# Look for architecture hints in state dict keys
|
| 96 |
+
print("Checking state dict keys for architecture hints...")
|
| 97 |
+
keys = list(model_dict.keys())[:10] # First 10 keys
|
| 98 |
+
for key in keys:
|
| 99 |
+
print(f" {key}")
|
| 100 |
+
|
| 101 |
+
# Count layers
|
| 102 |
+
layer_count = len([k for k in model_dict.keys() if 'weight' in k or 'bias' in k])
|
| 103 |
+
print(f"\nTotal weight/bias tensors: {layer_count}")
|
| 104 |
+
|
| 105 |
+
# Check checkpoint metadata
|
| 106 |
+
if 'epoch' in checkpoint:
|
| 107 |
+
print(f"Training epoch: {checkpoint.get('epoch', 'N/A')}")
|
| 108 |
+
if 'best_fitness' in checkpoint:
|
| 109 |
+
print(f"Best fitness: {checkpoint.get('best_fitness', 'N/A')}")
|
| 110 |
+
|
| 111 |
+
# File size
|
| 112 |
+
file_size_mb = model_path.stat().st_size / (1024 * 1024)
|
| 113 |
+
print(f"\nModel file size: {file_size_mb:.2f} MB")
|
| 114 |
+
|
| 115 |
+
# Rough size estimates based on file size (very approximate)
|
| 116 |
+
if file_size_mb < 6:
|
| 117 |
+
size_estimate = "Likely Nano (n) - file < 6MB"
|
| 118 |
+
elif file_size_mb < 22:
|
| 119 |
+
size_estimate = "Likely Small (s) - file 6-22MB"
|
| 120 |
+
elif file_size_mb < 50:
|
| 121 |
+
size_estimate = "Likely Medium (m) - file 22-50MB"
|
| 122 |
+
elif file_size_mb < 85:
|
| 123 |
+
size_estimate = "Likely Large (l) - file 50-85MB"
|
| 124 |
+
else:
|
| 125 |
+
size_estimate = "Likely XLarge (x) - file > 85MB"
|
| 126 |
+
print(f"Size estimate from file: {size_estimate}")
|
| 127 |
+
|
| 128 |
+
except Exception as e:
|
| 129 |
+
print(f"Error with direct PyTorch inspection: {e}")
|
| 130 |
+
|
| 131 |
+
print("\n" + "=" * 60)
|
| 132 |
+
print("Inspection complete!")
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def main():
|
| 136 |
+
parser = argparse.ArgumentParser(
|
| 137 |
+
description="Inspect YOLO .pt model to determine variant"
|
| 138 |
+
)
|
| 139 |
+
parser.add_argument(
|
| 140 |
+
"--model_path",
|
| 141 |
+
type=Path,
|
| 142 |
+
help="Path to YOLO .pt model file"
|
| 143 |
+
)
|
| 144 |
+
args = parser.parse_args()
|
| 145 |
+
|
| 146 |
+
if not args.model_path.exists():
|
| 147 |
+
print(f"Error: Model file not found: {args.model_path}")
|
| 148 |
+
return
|
| 149 |
+
|
| 150 |
+
inspect_yolo_model(args.model_path)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
if __name__ == "__main__":
|
| 154 |
+
main()
|
| 155 |
+
|
keypoint
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7ea78fa76aaf94976a8eca428d6e3c59697a93430cba1a4603e20284b61f5113
|
| 3 |
+
size 264964645
|
keypoint.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6dd10dba85895c92760cdb5a99c5cfca899c68f361a66c5448f38a187280ee1f
|
| 3 |
+
size 6849672
|
keypoint_evaluation.py
ADDED
|
@@ -0,0 +1,956 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import absolute_import
|
| 2 |
+
from __future__ import division
|
| 3 |
+
from __future__ import print_function
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
from typing import List, Tuple, Optional
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
import numpy as np
|
| 9 |
+
from numpy import extract, ndarray, array, float32, uint8
|
| 10 |
+
import copy
|
| 11 |
+
|
| 12 |
+
import cv2
|
| 13 |
+
|
| 14 |
+
# Try to import PyTorch for GPU-accelerated warping
|
| 15 |
+
try:
|
| 16 |
+
import torch
|
| 17 |
+
import torch.nn.functional as F
|
| 18 |
+
TORCH_AVAILABLE = True
|
| 19 |
+
except ImportError:
|
| 20 |
+
TORCH_AVAILABLE = False
|
| 21 |
+
torch = None
|
| 22 |
+
F = None
|
| 23 |
+
|
| 24 |
+
# Import cv2 functions
|
| 25 |
+
bitwise_and = cv2.bitwise_and
|
| 26 |
+
findHomography = cv2.findHomography
|
| 27 |
+
warpPerspective = cv2.warpPerspective
|
| 28 |
+
cvtColor = cv2.cvtColor
|
| 29 |
+
COLOR_BGR2GRAY = cv2.COLOR_BGR2GRAY
|
| 30 |
+
threshold = cv2.threshold
|
| 31 |
+
THRESH_BINARY = cv2.THRESH_BINARY
|
| 32 |
+
getStructuringElement = cv2.getStructuringElement
|
| 33 |
+
MORPH_RECT = cv2.MORPH_RECT
|
| 34 |
+
MORPH_TOPHAT = cv2.MORPH_TOPHAT
|
| 35 |
+
GaussianBlur = cv2.GaussianBlur
|
| 36 |
+
morphologyEx = cv2.morphologyEx
|
| 37 |
+
Canny = cv2.Canny
|
| 38 |
+
connectedComponents = cv2.connectedComponents
|
| 39 |
+
perspectiveTransform = cv2.perspectiveTransform
|
| 40 |
+
RETR_EXTERNAL = cv2.RETR_EXTERNAL
|
| 41 |
+
CHAIN_APPROX_SIMPLE = cv2.CHAIN_APPROX_SIMPLE
|
| 42 |
+
findContours = cv2.findContours
|
| 43 |
+
boundingRect = cv2.boundingRect
|
| 44 |
+
dilate = cv2.dilate
|
| 45 |
+
|
| 46 |
+
logger = logging.getLogger(__name__)
|
| 47 |
+
|
| 48 |
+
# Template keypoints constant - define your keypoints here
|
| 49 |
+
# Format: List of (x, y) tuples representing keypoint coordinates on the template image
|
| 50 |
+
TEMPLATE_KEYPOINTS: list[tuple[int, int]] = [
|
| 51 |
+
(5, 5), # 1
|
| 52 |
+
(5, 140), # 2
|
| 53 |
+
(5, 250), # 3
|
| 54 |
+
(5, 430), # 4
|
| 55 |
+
(5, 540), # 5
|
| 56 |
+
(5, 675), # 6
|
| 57 |
+
# -------------
|
| 58 |
+
(55, 250), # 7
|
| 59 |
+
(55, 430), # 8
|
| 60 |
+
# -------------
|
| 61 |
+
(110, 340), # 9
|
| 62 |
+
# -------------
|
| 63 |
+
(165, 140), # 10
|
| 64 |
+
(165, 270), # 11
|
| 65 |
+
(165, 410), # 12
|
| 66 |
+
(165, 540), # 13
|
| 67 |
+
# -------------
|
| 68 |
+
(527, 5), # 14
|
| 69 |
+
(527, 253), # 15
|
| 70 |
+
(527, 433), # 16
|
| 71 |
+
(527, 675), # 17
|
| 72 |
+
# -------------
|
| 73 |
+
(888, 140), # 18
|
| 74 |
+
(888, 270), # 19
|
| 75 |
+
(888, 410), # 20
|
| 76 |
+
(888, 540), # 21
|
| 77 |
+
# -------------
|
| 78 |
+
(940, 340), # 22
|
| 79 |
+
# -------------
|
| 80 |
+
(998, 250), # 23
|
| 81 |
+
(998, 430), # 24
|
| 82 |
+
# -------------
|
| 83 |
+
(1045, 5), # 25
|
| 84 |
+
(1045, 140), # 26
|
| 85 |
+
(1045, 250), # 27
|
| 86 |
+
(1045, 430), # 28
|
| 87 |
+
(1045, 540), # 29
|
| 88 |
+
(1045, 675), # 30
|
| 89 |
+
# -------------
|
| 90 |
+
(435, 340), # 31
|
| 91 |
+
(615, 340), # 32
|
| 92 |
+
]
|
| 93 |
+
|
| 94 |
+
INDEX_KEYPOINT_CORNER_BOTTOM_LEFT = 5
|
| 95 |
+
INDEX_KEYPOINT_CORNER_BOTTOM_RIGHT = 29
|
| 96 |
+
INDEX_KEYPOINT_CORNER_TOP_LEFT = 0
|
| 97 |
+
INDEX_KEYPOINT_CORNER_TOP_RIGHT = 24
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class InvalidMask(Exception):
|
| 101 |
+
"""Exception raised when mask validation fails."""
|
| 102 |
+
pass
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def has_a_wide_line(mask: ndarray, max_aspect_ratio: float = 1.0) -> bool:
|
| 106 |
+
contours, _ = findContours(mask, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE)
|
| 107 |
+
for cnt in contours:
|
| 108 |
+
x, y, w, h = boundingRect(cnt)
|
| 109 |
+
aspect_ratio = min(w, h) / max(w, h)
|
| 110 |
+
# print(f"Aspect ratio: {aspect_ratio}, width: {w}, height: {h}")
|
| 111 |
+
if aspect_ratio >= max_aspect_ratio:
|
| 112 |
+
return True
|
| 113 |
+
return False
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def is_bowtie(points: ndarray) -> bool:
|
| 117 |
+
def segments_intersect(p1: int, p2: int, q1: int, q2: int) -> bool:
|
| 118 |
+
def ccw(a: int, b: int, c: int):
|
| 119 |
+
return (c[1] - a[1]) * (b[0] - a[0]) > (b[1] - a[1]) * (c[0] - a[0])
|
| 120 |
+
|
| 121 |
+
return (ccw(p1, q1, q2) != ccw(p2, q1, q2)) and (
|
| 122 |
+
ccw(p1, p2, q1) != ccw(p1, p2, q2)
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
pts = points.reshape(-1, 2)
|
| 126 |
+
edges = [(pts[0], pts[1]), (pts[1], pts[2]), (pts[2], pts[3]), (pts[3], pts[0])]
|
| 127 |
+
return segments_intersect(*edges[0], *edges[2]) or segments_intersect(
|
| 128 |
+
*edges[1], *edges[3]
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
def validate_mask_lines(mask: ndarray) -> None:
|
| 132 |
+
if mask.sum() == 0:
|
| 133 |
+
raise InvalidMask("No projected lines")
|
| 134 |
+
if mask.sum() == mask.size:
|
| 135 |
+
raise InvalidMask("Projected lines cover the entire image surface")
|
| 136 |
+
if has_a_wide_line(mask=mask):
|
| 137 |
+
raise InvalidMask("A projected line is too wide")
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def validate_mask_ground(mask: ndarray) -> None:
|
| 141 |
+
num_labels, _ = connectedComponents(mask)
|
| 142 |
+
num_distinct_regions = num_labels - 1
|
| 143 |
+
if num_distinct_regions > 1:
|
| 144 |
+
raise InvalidMask(
|
| 145 |
+
f"Projected ground should be a single object, detected {num_distinct_regions}"
|
| 146 |
+
)
|
| 147 |
+
area_covered = mask.sum() / mask.size
|
| 148 |
+
if area_covered >= 0.9:
|
| 149 |
+
raise InvalidMask(
|
| 150 |
+
f"Projected ground covers more than {area_covered:.2f}% of the image surface which is unrealistic"
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def validate_projected_corners(
|
| 155 |
+
source_keypoints: list[tuple[int, int]], homography_matrix: ndarray
|
| 156 |
+
) -> None:
|
| 157 |
+
src_corners = array(
|
| 158 |
+
[
|
| 159 |
+
source_keypoints[INDEX_KEYPOINT_CORNER_BOTTOM_LEFT],
|
| 160 |
+
source_keypoints[INDEX_KEYPOINT_CORNER_BOTTOM_RIGHT],
|
| 161 |
+
source_keypoints[INDEX_KEYPOINT_CORNER_TOP_RIGHT],
|
| 162 |
+
source_keypoints[INDEX_KEYPOINT_CORNER_TOP_LEFT],
|
| 163 |
+
],
|
| 164 |
+
dtype="float32",
|
| 165 |
+
)[None, :, :]
|
| 166 |
+
|
| 167 |
+
warped_corners = perspectiveTransform(src_corners, homography_matrix)[0]
|
| 168 |
+
|
| 169 |
+
if is_bowtie(warped_corners):
|
| 170 |
+
raise InvalidMask("Projection twisted!")
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def project_image_using_keypoints(
|
| 174 |
+
image: ndarray,
|
| 175 |
+
source_keypoints: List[Tuple[int, int]],
|
| 176 |
+
destination_keypoints: List[Tuple[int, int]],
|
| 177 |
+
destination_width: int,
|
| 178 |
+
destination_height: int,
|
| 179 |
+
inverse: bool = False,
|
| 180 |
+
) -> ndarray:
|
| 181 |
+
"""Project image using homography from source to destination keypoints."""
|
| 182 |
+
filtered_src = []
|
| 183 |
+
filtered_dst = []
|
| 184 |
+
|
| 185 |
+
for src_pt, dst_pt in zip(source_keypoints, destination_keypoints):
|
| 186 |
+
if dst_pt[0] == 0.0 and dst_pt[1] == 0.0: # ignore default / missing points
|
| 187 |
+
continue
|
| 188 |
+
filtered_src.append(src_pt)
|
| 189 |
+
filtered_dst.append(dst_pt)
|
| 190 |
+
|
| 191 |
+
if len(filtered_src) < 4:
|
| 192 |
+
raise ValueError("At least 4 valid keypoints are required for homography.")
|
| 193 |
+
|
| 194 |
+
source_points = array(filtered_src, dtype=float32)
|
| 195 |
+
destination_points = array(filtered_dst, dtype=float32)
|
| 196 |
+
|
| 197 |
+
if inverse:
|
| 198 |
+
result = findHomography(destination_points, source_points)
|
| 199 |
+
if result is None:
|
| 200 |
+
raise ValueError("Failed to compute inverse homography.")
|
| 201 |
+
H_inv, _ = result
|
| 202 |
+
return warpPerspective(image, H_inv, (destination_width, destination_height))
|
| 203 |
+
|
| 204 |
+
result = findHomography(source_points, destination_points)
|
| 205 |
+
if result is None:
|
| 206 |
+
raise ValueError("Failed to compute homography.")
|
| 207 |
+
H, _ = result
|
| 208 |
+
projected_image = warpPerspective(image, H, (destination_width, destination_height))
|
| 209 |
+
|
| 210 |
+
validate_projected_corners(source_keypoints=source_keypoints, homography_matrix=H)
|
| 211 |
+
return projected_image
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def extract_masks_for_ground_and_lines(
|
| 215 |
+
image: ndarray,
|
| 216 |
+
) -> Tuple[ndarray, ndarray]:
|
| 217 |
+
"""Extract masks for ground (gray) and lines (white) from template image."""
|
| 218 |
+
gray = cvtColor(image, COLOR_BGR2GRAY)
|
| 219 |
+
_, mask_ground = threshold(gray, 10, 255, THRESH_BINARY)
|
| 220 |
+
_, mask_lines = threshold(gray, 200, 255, THRESH_BINARY)
|
| 221 |
+
mask_ground_binary = (mask_ground > 0).astype(uint8)
|
| 222 |
+
mask_lines_binary = (mask_lines > 0).astype(uint8)
|
| 223 |
+
validate_mask_ground(mask=mask_ground_binary)
|
| 224 |
+
validate_mask_lines(mask=mask_lines_binary)
|
| 225 |
+
return mask_ground_binary, mask_lines_binary
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def extract_masks_for_ground_and_lines_no_validation(
|
| 229 |
+
image: ndarray,
|
| 230 |
+
) -> Tuple[ndarray, ndarray]:
|
| 231 |
+
"""
|
| 232 |
+
Extract masks for ground (gray) and lines (white) from template image WITHOUT validation.
|
| 233 |
+
This is useful for line distribution analysis where exact fitting might create invalid masks
|
| 234 |
+
but we still want to analyze where lines are located.
|
| 235 |
+
"""
|
| 236 |
+
gray = cvtColor(image, COLOR_BGR2GRAY)
|
| 237 |
+
_, mask_ground = threshold(gray, 10, 255, THRESH_BINARY)
|
| 238 |
+
_, mask_lines = threshold(gray, 200, 255, THRESH_BINARY)
|
| 239 |
+
mask_ground_binary = (mask_ground > 0).astype(uint8)
|
| 240 |
+
mask_lines_binary = (mask_lines > 0).astype(uint8)
|
| 241 |
+
# No validation - return masks as-is
|
| 242 |
+
return mask_ground_binary, mask_lines_binary
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def extract_mask_of_ground_lines_in_image(
|
| 246 |
+
image: ndarray,
|
| 247 |
+
ground_mask: ndarray,
|
| 248 |
+
blur_ksize: int = 5,
|
| 249 |
+
canny_low: int = 30,
|
| 250 |
+
canny_high: int = 100,
|
| 251 |
+
use_tophat: bool = True,
|
| 252 |
+
dilate_kernel_size: int = 3,
|
| 253 |
+
dilate_iterations: int = 3,
|
| 254 |
+
) -> ndarray:
|
| 255 |
+
"""Extract line mask from image using edge detection on ground region."""
|
| 256 |
+
gray = cvtColor(image, COLOR_BGR2GRAY)
|
| 257 |
+
|
| 258 |
+
if use_tophat:
|
| 259 |
+
kernel = getStructuringElement(MORPH_RECT, (31, 31))
|
| 260 |
+
gray = morphologyEx(gray, MORPH_TOPHAT, kernel)
|
| 261 |
+
|
| 262 |
+
if blur_ksize and blur_ksize % 2 == 1:
|
| 263 |
+
gray = GaussianBlur(gray, (blur_ksize, blur_ksize), 0)
|
| 264 |
+
|
| 265 |
+
image_edges = Canny(gray, canny_low, canny_high)
|
| 266 |
+
image_edges_on_ground = bitwise_and(image_edges, image_edges, mask=ground_mask)
|
| 267 |
+
|
| 268 |
+
if dilate_kernel_size > 1:
|
| 269 |
+
dilate_kernel = getStructuringElement(
|
| 270 |
+
MORPH_RECT, (dilate_kernel_size, dilate_kernel_size)
|
| 271 |
+
)
|
| 272 |
+
image_edges_on_ground = dilate(
|
| 273 |
+
image_edges_on_ground, dilate_kernel, iterations=dilate_iterations
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
return (image_edges_on_ground > 0).astype(uint8)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
def evaluate_keypoints_for_frame(
|
| 280 |
+
template_keypoints: List[Tuple[int, int]],
|
| 281 |
+
frame_keypoints: List[Tuple[int, int]],
|
| 282 |
+
frame: ndarray,
|
| 283 |
+
floor_markings_template: ndarray,
|
| 284 |
+
) -> float:
|
| 285 |
+
"""
|
| 286 |
+
Evaluate keypoint accuracy for a single frame.
|
| 287 |
+
|
| 288 |
+
Returns score between 0.0 and 1.0 based on overlap between
|
| 289 |
+
projected template lines and detected lines in frame.
|
| 290 |
+
"""
|
| 291 |
+
try:
|
| 292 |
+
warped_template = project_image_using_keypoints(
|
| 293 |
+
image=floor_markings_template,
|
| 294 |
+
source_keypoints=template_keypoints,
|
| 295 |
+
destination_keypoints=frame_keypoints,
|
| 296 |
+
destination_width=frame.shape[1],
|
| 297 |
+
destination_height=frame.shape[0],
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
mask_ground, mask_lines_expected = extract_masks_for_ground_and_lines(
|
| 301 |
+
image=warped_template
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
mask_lines_predicted = extract_mask_of_ground_lines_in_image(
|
| 305 |
+
image=frame, ground_mask=mask_ground
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
pixels_overlapping = bitwise_and(
|
| 309 |
+
mask_lines_expected, mask_lines_predicted
|
| 310 |
+
).sum()
|
| 311 |
+
|
| 312 |
+
pixels_on_lines = mask_lines_expected.sum()
|
| 313 |
+
|
| 314 |
+
score = pixels_overlapping / (pixels_on_lines + 1e-8)
|
| 315 |
+
|
| 316 |
+
return min(1.0, max(0.0, score)) # Clamp to [0, 1]
|
| 317 |
+
|
| 318 |
+
except (InvalidMask, ValueError) as e:
|
| 319 |
+
print(f'InvalidMask or ValueError in keypoint evaluation: {e}')
|
| 320 |
+
return 0.0
|
| 321 |
+
except Exception as e:
|
| 322 |
+
print(f'Unexpected error in keypoint evaluation: {e}')
|
| 323 |
+
return 0.0
|
| 324 |
+
|
| 325 |
+
def warp_image_pytorch(
|
| 326 |
+
image: ndarray,
|
| 327 |
+
homography_matrix: ndarray,
|
| 328 |
+
output_width: int,
|
| 329 |
+
output_height: int,
|
| 330 |
+
device: str = "cuda",
|
| 331 |
+
) -> ndarray:
|
| 332 |
+
"""
|
| 333 |
+
Warp image using PyTorch (GPU-accelerated) instead of cv2.warpPerspective.
|
| 334 |
+
|
| 335 |
+
Args:
|
| 336 |
+
image: Input image to warp (H, W, C) numpy array
|
| 337 |
+
homography_matrix: 3x3 homography matrix
|
| 338 |
+
output_width: Output image width
|
| 339 |
+
output_height: Output image height
|
| 340 |
+
device: "cuda" or "cpu"
|
| 341 |
+
|
| 342 |
+
Returns:
|
| 343 |
+
Warped image as numpy array
|
| 344 |
+
"""
|
| 345 |
+
if not TORCH_AVAILABLE:
|
| 346 |
+
# Fallback to OpenCV if PyTorch not available
|
| 347 |
+
return warpPerspective(image, homography_matrix, (output_width, output_height))
|
| 348 |
+
|
| 349 |
+
# Auto-detect device
|
| 350 |
+
if device == "cuda" and (not torch.cuda.is_available()):
|
| 351 |
+
device = "cpu"
|
| 352 |
+
|
| 353 |
+
try:
|
| 354 |
+
# Convert to tensor and move to device
|
| 355 |
+
image_tensor = torch.from_numpy(image).to(device).float()
|
| 356 |
+
H = torch.from_numpy(homography_matrix).to(device).float()
|
| 357 |
+
|
| 358 |
+
# Get image dimensions
|
| 359 |
+
h, w = image.shape[:2]
|
| 360 |
+
if len(image.shape) == 2:
|
| 361 |
+
# Grayscale
|
| 362 |
+
image_tensor = image_tensor.unsqueeze(2) # Add channel dimension
|
| 363 |
+
channels = 1
|
| 364 |
+
else:
|
| 365 |
+
channels = image.shape[2]
|
| 366 |
+
|
| 367 |
+
# Create coordinate grid for output image
|
| 368 |
+
y_coords, x_coords = torch.meshgrid(
|
| 369 |
+
torch.arange(0, output_height, device=device, dtype=torch.float32),
|
| 370 |
+
torch.arange(0, output_width, device=device, dtype=torch.float32),
|
| 371 |
+
indexing='ij'
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
# Apply inverse homography to get source coordinates
|
| 375 |
+
ones = torch.ones_like(x_coords)
|
| 376 |
+
coords = torch.stack([x_coords.flatten(), y_coords.flatten(), ones.flatten()], dim=0)
|
| 377 |
+
H_inv = torch.inverse(H)
|
| 378 |
+
src_coords = H_inv @ coords
|
| 379 |
+
src_coords = src_coords[:2] / (src_coords[2:3] + 1e-8)
|
| 380 |
+
|
| 381 |
+
# Reshape and normalize to [-1, 1] for grid_sample
|
| 382 |
+
src_x = src_coords[0].reshape(output_height, output_width)
|
| 383 |
+
src_y = src_coords[1].reshape(output_height, output_width)
|
| 384 |
+
|
| 385 |
+
# Normalize coordinates to [-1, 1] for grid_sample
|
| 386 |
+
src_x_norm = 2.0 * src_x / (w - 1) - 1.0
|
| 387 |
+
src_y_norm = 2.0 * src_y / (h - 1) - 1.0
|
| 388 |
+
grid = torch.stack([src_x_norm, src_y_norm], dim=-1).unsqueeze(0) # [1, H, W, 2]
|
| 389 |
+
|
| 390 |
+
# Prepare image tensor: [1, C, H, W]
|
| 391 |
+
image_batch = image_tensor.permute(2, 0, 1).unsqueeze(0)
|
| 392 |
+
|
| 393 |
+
# Warp using grid_sample
|
| 394 |
+
warped = F.grid_sample(
|
| 395 |
+
image_batch, grid, mode='bilinear', padding_mode='zeros', align_corners=True
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
# Convert back to numpy: [H, W, C]
|
| 399 |
+
warped = warped.squeeze(0).permute(1, 2, 0)
|
| 400 |
+
|
| 401 |
+
# Remove channel dimension if grayscale
|
| 402 |
+
if channels == 1:
|
| 403 |
+
warped = warped.squeeze(2)
|
| 404 |
+
|
| 405 |
+
# Convert to uint8 and return as numpy
|
| 406 |
+
warped_np = warped.cpu().numpy().clip(0, 255).astype(np.uint8)
|
| 407 |
+
return warped_np
|
| 408 |
+
|
| 409 |
+
except Exception as e:
|
| 410 |
+
logger.error(f"PyTorch warping failed: {e}, falling back to OpenCV")
|
| 411 |
+
return warpPerspective(image, homography_matrix, (output_width, output_height))
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
def evaluate_keypoints_for_frame_gpu(
|
| 415 |
+
template_keypoints: List[Tuple[int, int]],
|
| 416 |
+
frame_keypoints: List[Tuple[int, int]],
|
| 417 |
+
frame: ndarray,
|
| 418 |
+
floor_markings_template: ndarray,
|
| 419 |
+
device: str = "cuda",
|
| 420 |
+
) -> float:
|
| 421 |
+
"""
|
| 422 |
+
GPU-accelerated keypoint evaluation using PyTorch for warping.
|
| 423 |
+
|
| 424 |
+
This function uses PyTorch's grid_sample for GPU-accelerated image warping
|
| 425 |
+
instead of cv2.warpPerspective, making it compatible with PyTorch CUDA.
|
| 426 |
+
|
| 427 |
+
Args:
|
| 428 |
+
template_keypoints: Template keypoint coordinates
|
| 429 |
+
frame_keypoints: Frame keypoint coordinates
|
| 430 |
+
frame: Input frame image
|
| 431 |
+
floor_markings_template: Template image
|
| 432 |
+
device: "cuda" or "cpu" (auto-detects if CUDA available)
|
| 433 |
+
|
| 434 |
+
Returns:
|
| 435 |
+
Score between 0.0 and 1.0
|
| 436 |
+
"""
|
| 437 |
+
if not TORCH_AVAILABLE:
|
| 438 |
+
# Fallback to CPU version if PyTorch not available
|
| 439 |
+
return evaluate_keypoints_for_frame(
|
| 440 |
+
template_keypoints, frame_keypoints, frame, floor_markings_template
|
| 441 |
+
)
|
| 442 |
+
|
| 443 |
+
# Auto-detect device
|
| 444 |
+
if device == "cuda" and not torch.cuda.is_available():
|
| 445 |
+
device = "cpu"
|
| 446 |
+
|
| 447 |
+
try:
|
| 448 |
+
# Step 1: Compute homography (CPU - small operation)
|
| 449 |
+
filtered_src = []
|
| 450 |
+
filtered_dst = []
|
| 451 |
+
for src_pt, dst_pt in zip(template_keypoints, frame_keypoints):
|
| 452 |
+
if dst_pt[0] == 0.0 and dst_pt[1] == 0.0:
|
| 453 |
+
continue
|
| 454 |
+
filtered_src.append(src_pt)
|
| 455 |
+
filtered_dst.append(dst_pt)
|
| 456 |
+
|
| 457 |
+
if len(filtered_src) < 4:
|
| 458 |
+
return 0.0
|
| 459 |
+
|
| 460 |
+
source_points = array(filtered_src, dtype=float32)
|
| 461 |
+
destination_points = array(filtered_dst, dtype=float32)
|
| 462 |
+
result = findHomography(source_points, destination_points)
|
| 463 |
+
if result is None:
|
| 464 |
+
return 0.0
|
| 465 |
+
H, _ = result
|
| 466 |
+
|
| 467 |
+
# Validate corners
|
| 468 |
+
src_corners = array([
|
| 469 |
+
template_keypoints[INDEX_KEYPOINT_CORNER_BOTTOM_LEFT],
|
| 470 |
+
template_keypoints[INDEX_KEYPOINT_CORNER_BOTTOM_RIGHT],
|
| 471 |
+
template_keypoints[INDEX_KEYPOINT_CORNER_TOP_RIGHT],
|
| 472 |
+
template_keypoints[INDEX_KEYPOINT_CORNER_TOP_LEFT],
|
| 473 |
+
], dtype=float32)[None, :, :]
|
| 474 |
+
warped_corners = perspectiveTransform(src_corners, H)[0]
|
| 475 |
+
if is_bowtie(warped_corners):
|
| 476 |
+
return 0.0
|
| 477 |
+
|
| 478 |
+
# Step 2: Warp template using PyTorch (GPU-accelerated)
|
| 479 |
+
h, w = frame.shape[:2]
|
| 480 |
+
warped_template = warp_image_pytorch(
|
| 481 |
+
floor_markings_template,
|
| 482 |
+
H,
|
| 483 |
+
w,
|
| 484 |
+
h,
|
| 485 |
+
device=device
|
| 486 |
+
)
|
| 487 |
+
|
| 488 |
+
# Step 3: Extract masks (CPU - OpenCV operations)
|
| 489 |
+
mask_ground, mask_lines_expected = extract_masks_for_ground_and_lines(
|
| 490 |
+
image=warped_template
|
| 491 |
+
)
|
| 492 |
+
|
| 493 |
+
mask_lines_predicted = extract_mask_of_ground_lines_in_image(
|
| 494 |
+
image=frame, ground_mask=mask_ground
|
| 495 |
+
)
|
| 496 |
+
|
| 497 |
+
# Step 4: Compute overlap
|
| 498 |
+
pixels_overlapping = bitwise_and(
|
| 499 |
+
mask_lines_expected, mask_lines_predicted
|
| 500 |
+
).sum()
|
| 501 |
+
|
| 502 |
+
pixels_on_lines = mask_lines_expected.sum()
|
| 503 |
+
|
| 504 |
+
score = pixels_overlapping / (pixels_on_lines + 1e-8)
|
| 505 |
+
return min(1.0, max(0.0, score))
|
| 506 |
+
|
| 507 |
+
except (InvalidMask, ValueError) as e:
|
| 508 |
+
logger.debug(f"Keypoint evaluation failed: {e}")
|
| 509 |
+
return 0.0
|
| 510 |
+
except Exception as e:
|
| 511 |
+
logger.error(f"GPU evaluation failed: {e}, falling back to CPU")
|
| 512 |
+
return evaluate_keypoints_for_frame(
|
| 513 |
+
template_keypoints, frame_keypoints, frame, floor_markings_template
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
# Cache for template GpuMat to avoid re-uploading on every frame
|
| 518 |
+
_template_gpumat_cache = None
|
| 519 |
+
_template_cache_key = None
|
| 520 |
+
_cuda_available_cache = None
|
| 521 |
+
_cuda_module_cache = None
|
| 522 |
+
_frame_gpumat_reusable = None # Reusable GpuMat for frames (same size)
|
| 523 |
+
_frame_gpumat_size = None # Size of the reusable frame GpuMat
|
| 524 |
+
|
| 525 |
+
def evaluate_keypoints_for_frame_opencv_cuda(
|
| 526 |
+
template_keypoints: List[Tuple[int, int]],
|
| 527 |
+
frame_keypoints: List[Tuple[int, int]],
|
| 528 |
+
frame: ndarray,
|
| 529 |
+
floor_markings_template: ndarray,
|
| 530 |
+
device: str = "cuda",
|
| 531 |
+
) -> float:
|
| 532 |
+
"""
|
| 533 |
+
GPU-accelerated version using OpenCV CUDA (if available).
|
| 534 |
+
Falls back to CPU if CUDA not available.
|
| 535 |
+
|
| 536 |
+
Note: opencv-python-headless doesn't include CUDA support, so this will
|
| 537 |
+
always fall back to CPU. Use evaluate_keypoints_for_frame_gpu for PyTorch GPU acceleration.
|
| 538 |
+
|
| 539 |
+
Optimizations:
|
| 540 |
+
- Template GpuMat is cached to avoid re-uploading
|
| 541 |
+
- CUDA availability check is cached
|
| 542 |
+
- Frame GpuMat is reused when frame size matches
|
| 543 |
+
- Keypoint filtering optimized with list comprehension
|
| 544 |
+
|
| 545 |
+
Args:
|
| 546 |
+
device: Ignored (kept for compatibility). OpenCV CUDA check is automatic.
|
| 547 |
+
"""
|
| 548 |
+
global _template_gpumat_cache, _template_cache_key
|
| 549 |
+
global _cuda_available_cache, _cuda_module_cache, _frame_gpumat_reusable, _frame_gpumat_size
|
| 550 |
+
|
| 551 |
+
# Cache CUDA availability check (only check once)
|
| 552 |
+
if _cuda_available_cache is None:
|
| 553 |
+
cuda_available = False
|
| 554 |
+
cuda = None
|
| 555 |
+
try:
|
| 556 |
+
import cv2.cuda as cuda
|
| 557 |
+
# Check if cv2.cuda actually has CUDA functions (not just a stub)
|
| 558 |
+
if hasattr(cuda, 'warpPerspective'):
|
| 559 |
+
# Try to create a GpuMat to verify CUDA is actually working
|
| 560 |
+
try:
|
| 561 |
+
test_mat = cuda.GpuMat()
|
| 562 |
+
test_mat.upload(np.zeros((10, 10, 3), dtype=np.uint8))
|
| 563 |
+
cuda_available = True
|
| 564 |
+
except (AttributeError, Exception):
|
| 565 |
+
# GpuMat exists but doesn't work (stub module)
|
| 566 |
+
cuda_available = False
|
| 567 |
+
except (ImportError, AttributeError):
|
| 568 |
+
cuda_available = False
|
| 569 |
+
|
| 570 |
+
_cuda_available_cache = cuda_available
|
| 571 |
+
_cuda_module_cache = cuda
|
| 572 |
+
else:
|
| 573 |
+
cuda_available = _cuda_available_cache
|
| 574 |
+
cuda = _cuda_module_cache
|
| 575 |
+
|
| 576 |
+
# Always use CPU version since opencv-python-headless doesn't have CUDA
|
| 577 |
+
# The check above will fail, so we fall back to CPU
|
| 578 |
+
if not cuda_available:
|
| 579 |
+
# Use CPU version (this is what will happen with opencv-python-headless)
|
| 580 |
+
return evaluate_keypoints_for_frame(
|
| 581 |
+
template_keypoints, frame_keypoints, frame, floor_markings_template
|
| 582 |
+
)
|
| 583 |
+
|
| 584 |
+
# If we get here, OpenCV CUDA is actually available (unlikely with opencv-python-headless)
|
| 585 |
+
try:
|
| 586 |
+
# Create cache key based on template image shape and a fast checksum
|
| 587 |
+
# Using shape + sum of corner pixels for fast comparison (much faster than full hash)
|
| 588 |
+
template_shape = floor_markings_template.shape
|
| 589 |
+
# Quick checksum: sum of corner pixels (fast to compute)
|
| 590 |
+
checksum = (
|
| 591 |
+
int(floor_markings_template[0, 0].sum()) +
|
| 592 |
+
int(floor_markings_template[0, -1].sum()) +
|
| 593 |
+
int(floor_markings_template[-1, 0].sum()) +
|
| 594 |
+
int(floor_markings_template[-1, -1].sum())
|
| 595 |
+
)
|
| 596 |
+
current_cache_key = (template_shape, checksum)
|
| 597 |
+
|
| 598 |
+
# Check if we need to update the cached GpuMat
|
| 599 |
+
if _template_gpumat_cache is None or _template_cache_key != current_cache_key:
|
| 600 |
+
# Upload template to GPU (only once or when template changes)
|
| 601 |
+
_template_gpumat_cache = cuda.GpuMat()
|
| 602 |
+
_template_gpumat_cache.upload(floor_markings_template)
|
| 603 |
+
_template_cache_key = current_cache_key
|
| 604 |
+
|
| 605 |
+
# Optimize frame upload: reuse GpuMat if frame size matches
|
| 606 |
+
h, w = frame.shape[:2]
|
| 607 |
+
frame_shape = (h, w)
|
| 608 |
+
if _frame_gpumat_reusable is None or _frame_gpumat_size != frame_shape:
|
| 609 |
+
_frame_gpumat_reusable = cuda.GpuMat()
|
| 610 |
+
_frame_gpumat_size = frame_shape
|
| 611 |
+
gpu_frame = _frame_gpumat_reusable
|
| 612 |
+
gpu_frame.upload(frame)
|
| 613 |
+
|
| 614 |
+
# Use cached template GpuMat
|
| 615 |
+
gpu_template = _template_gpumat_cache
|
| 616 |
+
|
| 617 |
+
# Optimize keypoint filtering with list comprehension (faster than loop)
|
| 618 |
+
filtered_pairs = [(src_pt, dst_pt) for src_pt, dst_pt in zip(template_keypoints, frame_keypoints)
|
| 619 |
+
if not (dst_pt[0] == 0.0 and dst_pt[1] == 0.0)]
|
| 620 |
+
|
| 621 |
+
if len(filtered_pairs) < 4:
|
| 622 |
+
return 0.0
|
| 623 |
+
|
| 624 |
+
# Unpack filtered pairs
|
| 625 |
+
filtered_src, filtered_dst = zip(*filtered_pairs)
|
| 626 |
+
|
| 627 |
+
# Compute homography (CPU - small operation, fast)
|
| 628 |
+
source_points = array(filtered_src, dtype=float32)
|
| 629 |
+
destination_points = array(filtered_dst, dtype=float32)
|
| 630 |
+
result = findHomography(source_points, destination_points)
|
| 631 |
+
if result is None:
|
| 632 |
+
return 0.0
|
| 633 |
+
H, _ = result
|
| 634 |
+
|
| 635 |
+
# Warp on GPU
|
| 636 |
+
gpu_warped = cuda.warpPerspective(gpu_template, H, (w, h))
|
| 637 |
+
|
| 638 |
+
# Download for mask extraction (unavoidable - mask extraction uses CPU OpenCV)
|
| 639 |
+
warped_template = gpu_warped.download()
|
| 640 |
+
|
| 641 |
+
# Rest of the pipeline (CPU operations - these are fast)
|
| 642 |
+
mask_ground, mask_lines_expected = extract_masks_for_ground_and_lines(warped_template)
|
| 643 |
+
mask_lines_predicted = extract_mask_of_ground_lines_in_image(frame, mask_ground)
|
| 644 |
+
|
| 645 |
+
# Overlap computation (using cv2.bitwise_and for consistency)
|
| 646 |
+
pixels_overlapping = bitwise_and(mask_lines_expected, mask_lines_predicted).sum()
|
| 647 |
+
pixels_on_lines = mask_lines_expected.sum()
|
| 648 |
+
score = pixels_overlapping / (pixels_on_lines + 1e-8)
|
| 649 |
+
return min(1.0, max(0.0, score))
|
| 650 |
+
|
| 651 |
+
except Exception as e:
|
| 652 |
+
logger.error(f"OpenCV CUDA evaluation failed: {e}, falling back to CPU")
|
| 653 |
+
return evaluate_keypoints_for_frame(
|
| 654 |
+
template_keypoints, frame_keypoints, frame, floor_markings_template
|
| 655 |
+
)
|
| 656 |
+
|
| 657 |
+
def evaluate_keypoints_batch_gpu(
|
| 658 |
+
template_keypoints: List[Tuple[int, int]],
|
| 659 |
+
frame_keypoints_list: List[List[Tuple[int, int]]],
|
| 660 |
+
frames: List[ndarray],
|
| 661 |
+
floor_markings_template: ndarray,
|
| 662 |
+
device: str = "cuda",
|
| 663 |
+
) -> List[float]:
|
| 664 |
+
"""
|
| 665 |
+
Batch GPU-accelerated keypoint evaluation for multiple frames simultaneously.
|
| 666 |
+
|
| 667 |
+
This function processes multiple frames in parallel using PyTorch batch operations,
|
| 668 |
+
which is much faster than evaluating frames one-by-one.
|
| 669 |
+
|
| 670 |
+
Args:
|
| 671 |
+
template_keypoints: Template keypoint coordinates (same for all frames)
|
| 672 |
+
frame_keypoints_list: List of frame keypoint coordinates (one per frame)
|
| 673 |
+
frames: List of frame images (numpy arrays)
|
| 674 |
+
floor_markings_template: Template image
|
| 675 |
+
device: "cuda" or "cpu"
|
| 676 |
+
|
| 677 |
+
Returns:
|
| 678 |
+
List of scores (one per frame) between 0.0 and 1.0
|
| 679 |
+
"""
|
| 680 |
+
if not TORCH_AVAILABLE:
|
| 681 |
+
# Fallback to sequential CPU evaluation
|
| 682 |
+
return [
|
| 683 |
+
evaluate_keypoints_for_frame(
|
| 684 |
+
template_keypoints, kp, frame, floor_markings_template
|
| 685 |
+
)
|
| 686 |
+
for kp, frame in zip(frame_keypoints_list, frames)
|
| 687 |
+
]
|
| 688 |
+
|
| 689 |
+
# Auto-detect device
|
| 690 |
+
if device == "cuda" and not torch.cuda.is_available():
|
| 691 |
+
device = "cpu"
|
| 692 |
+
|
| 693 |
+
batch_size = len(frames)
|
| 694 |
+
if batch_size == 0:
|
| 695 |
+
return []
|
| 696 |
+
|
| 697 |
+
# Get frame dimensions (assuming all frames have same size)
|
| 698 |
+
h, w = frames[0].shape[:2]
|
| 699 |
+
|
| 700 |
+
try:
|
| 701 |
+
# Step 1: Compute homographies for all frames (CPU - vectorized where possible)
|
| 702 |
+
homographies = []
|
| 703 |
+
valid_indices = []
|
| 704 |
+
|
| 705 |
+
for idx, (frame_keypoints, frame) in enumerate(zip(frame_keypoints_list, frames)):
|
| 706 |
+
# Filter keypoints
|
| 707 |
+
filtered_pairs = [(src_pt, dst_pt) for src_pt, dst_pt in zip(template_keypoints, frame_keypoints)
|
| 708 |
+
if not (dst_pt[0] == 0.0 and dst_pt[1] == 0.0)]
|
| 709 |
+
|
| 710 |
+
if len(filtered_pairs) < 4:
|
| 711 |
+
continue
|
| 712 |
+
|
| 713 |
+
filtered_src, filtered_dst = zip(*filtered_pairs)
|
| 714 |
+
source_points = array(filtered_src, dtype=float32)
|
| 715 |
+
destination_points = array(filtered_dst, dtype=float32)
|
| 716 |
+
result = findHomography(source_points, destination_points)
|
| 717 |
+
if result is None:
|
| 718 |
+
continue
|
| 719 |
+
H, _ = result
|
| 720 |
+
|
| 721 |
+
# Validate corners
|
| 722 |
+
src_corners = array([
|
| 723 |
+
template_keypoints[INDEX_KEYPOINT_CORNER_BOTTOM_LEFT],
|
| 724 |
+
template_keypoints[INDEX_KEYPOINT_CORNER_BOTTOM_RIGHT],
|
| 725 |
+
template_keypoints[INDEX_KEYPOINT_CORNER_TOP_RIGHT],
|
| 726 |
+
template_keypoints[INDEX_KEYPOINT_CORNER_TOP_LEFT],
|
| 727 |
+
], dtype=float32)[None, :, :]
|
| 728 |
+
warped_corners = perspectiveTransform(src_corners, H)[0]
|
| 729 |
+
if not is_bowtie(warped_corners):
|
| 730 |
+
homographies.append(H)
|
| 731 |
+
valid_indices.append(idx)
|
| 732 |
+
|
| 733 |
+
if len(homographies) == 0:
|
| 734 |
+
return [0.0] * batch_size
|
| 735 |
+
|
| 736 |
+
# Step 2: Batch warp using PyTorch (much faster than sequential)
|
| 737 |
+
template_tensor = torch.from_numpy(floor_markings_template).to(device).float()
|
| 738 |
+
t_h, t_w = floor_markings_template.shape[:2]
|
| 739 |
+
|
| 740 |
+
if len(floor_markings_template.shape) == 2:
|
| 741 |
+
template_tensor = template_tensor.unsqueeze(2)
|
| 742 |
+
t_channels = 1
|
| 743 |
+
else:
|
| 744 |
+
t_channels = floor_markings_template.shape[2]
|
| 745 |
+
|
| 746 |
+
# Prepare template batch: [B, C, H, W]
|
| 747 |
+
template_batch = template_tensor.permute(2, 0, 1).unsqueeze(0).repeat(len(homographies), 1, 1, 1)
|
| 748 |
+
|
| 749 |
+
# Create coordinate grids for all frames
|
| 750 |
+
y_coords, x_coords = torch.meshgrid(
|
| 751 |
+
torch.arange(0, h, device=device, dtype=torch.float32),
|
| 752 |
+
torch.arange(0, w, device=device, dtype=torch.float32),
|
| 753 |
+
indexing='ij'
|
| 754 |
+
)
|
| 755 |
+
ones = torch.ones_like(x_coords)
|
| 756 |
+
coords = torch.stack([x_coords.flatten(), y_coords.flatten(), ones.flatten()], dim=0) # [3, H*W]
|
| 757 |
+
|
| 758 |
+
# Batch process homographies
|
| 759 |
+
H_tensors = torch.from_numpy(np.stack(homographies)).to(device).float() # [B, 3, 3]
|
| 760 |
+
H_inv_batch = torch.inverse(H_tensors) # [B, 3, 3]
|
| 761 |
+
|
| 762 |
+
# Apply inverse homography for each frame: [B, 3, 3] @ [3, H*W] -> [B, 3, H*W]
|
| 763 |
+
coords_expanded = coords.unsqueeze(0).expand(len(homographies), -1, -1) # [B, 3, H*W]
|
| 764 |
+
src_coords_batch = torch.bmm(H_inv_batch, coords_expanded) # [B, 3, H*W]
|
| 765 |
+
src_coords_batch = src_coords_batch[:, :2] / (src_coords_batch[:, 2:3] + 1e-8) # [B, 2, H*W]
|
| 766 |
+
|
| 767 |
+
# Reshape and normalize to [-1, 1] for grid_sample
|
| 768 |
+
src_x_batch = src_coords_batch[:, 0].reshape(len(homographies), h, w)
|
| 769 |
+
src_y_batch = src_coords_batch[:, 1].reshape(len(homographies), h, w)
|
| 770 |
+
src_x_norm = 2.0 * src_x_batch / (t_w - 1) - 1.0
|
| 771 |
+
src_y_norm = 2.0 * src_y_batch / (t_h - 1) - 1.0
|
| 772 |
+
grid_batch = torch.stack([src_x_norm, src_y_norm], dim=-1) # [B, H, W, 2]
|
| 773 |
+
|
| 774 |
+
# Batch warp using grid_sample (all frames at once!)
|
| 775 |
+
warped_batch = F.grid_sample(
|
| 776 |
+
template_batch, grid_batch, mode='bilinear', padding_mode='zeros', align_corners=True
|
| 777 |
+
) # [B, C, H, W]
|
| 778 |
+
|
| 779 |
+
# Convert back to numpy: [B, H, W, C]
|
| 780 |
+
warped_batch = warped_batch.permute(0, 2, 3, 1)
|
| 781 |
+
if t_channels == 1:
|
| 782 |
+
warped_batch = warped_batch.squeeze(3)
|
| 783 |
+
warped_templates = warped_batch.cpu().numpy().clip(0, 255).astype(np.uint8)
|
| 784 |
+
|
| 785 |
+
# Step 3: Batch mask extraction and evaluation on GPU
|
| 786 |
+
scores = [0.0] * batch_size
|
| 787 |
+
|
| 788 |
+
# Convert to tensors for batch processing
|
| 789 |
+
warped_templates_tensor = torch.from_numpy(warped_templates).to(device).float()
|
| 790 |
+
frames_tensor = torch.from_numpy(np.stack([frames[i] for i in valid_indices])).to(device).float()
|
| 791 |
+
|
| 792 |
+
# Batch extract masks for warped templates (GPU)
|
| 793 |
+
# Convert to grayscale
|
| 794 |
+
if len(warped_templates_tensor.shape) == 4: # [B, H, W, C]
|
| 795 |
+
gray_templates = (warped_templates_tensor[:, :, :, 0] * 0.299 +
|
| 796 |
+
warped_templates_tensor[:, :, :, 1] * 0.587 +
|
| 797 |
+
warped_templates_tensor[:, :, :, 2] * 0.114)
|
| 798 |
+
else:
|
| 799 |
+
gray_templates = warped_templates_tensor
|
| 800 |
+
|
| 801 |
+
# Threshold for ground and lines (batch operation)
|
| 802 |
+
mask_ground_batch = (gray_templates > 10.0).float() # [B, H, W]
|
| 803 |
+
mask_lines_expected_batch = (gray_templates > 200.0).float() # [B, H, W]
|
| 804 |
+
|
| 805 |
+
# Batch extract predicted lines from frames (GPU)
|
| 806 |
+
if len(frames_tensor.shape) == 4: # [B, H, W, C]
|
| 807 |
+
gray_frames = (frames_tensor[:, :, :, 0] * 0.299 +
|
| 808 |
+
frames_tensor[:, :, :, 1] * 0.587 +
|
| 809 |
+
frames_tensor[:, :, :, 2] * 0.114)
|
| 810 |
+
else:
|
| 811 |
+
gray_frames = frames_tensor
|
| 812 |
+
|
| 813 |
+
# Simplified edge detection (batch Sobel)
|
| 814 |
+
# Sobel kernels
|
| 815 |
+
sobel_x = torch.tensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]],
|
| 816 |
+
device=device, dtype=torch.float32).unsqueeze(0).unsqueeze(0)
|
| 817 |
+
sobel_y = torch.tensor([[-1, -2, -1], [0, 0, 0], [1, 2, 1]],
|
| 818 |
+
device=device, dtype=torch.float32).unsqueeze(0).unsqueeze(0)
|
| 819 |
+
|
| 820 |
+
# Apply Sobel to batch
|
| 821 |
+
gray_frames_batch = gray_frames.unsqueeze(1) # [B, 1, H, W]
|
| 822 |
+
grad_x_batch = F.conv2d(gray_frames_batch, sobel_x, padding=1)
|
| 823 |
+
grad_y_batch = F.conv2d(gray_frames_batch, sobel_y, padding=1)
|
| 824 |
+
magnitude_batch = torch.sqrt(grad_x_batch.squeeze(1) ** 2 + grad_y_batch.squeeze(1) ** 2 + 1e-8)
|
| 825 |
+
edges_batch = (magnitude_batch > 30.0).float() # [B, H, W]
|
| 826 |
+
|
| 827 |
+
# Apply ground mask
|
| 828 |
+
mask_lines_predicted_batch = edges_batch * mask_ground_batch
|
| 829 |
+
|
| 830 |
+
# Batch overlap computation (all on GPU!)
|
| 831 |
+
pixels_overlapping_batch = (mask_lines_expected_batch * mask_lines_predicted_batch).sum(dim=(1, 2)) # [B]
|
| 832 |
+
pixels_on_lines_batch = mask_lines_expected_batch.sum(dim=(1, 2)) # [B]
|
| 833 |
+
scores_batch = (pixels_overlapping_batch / (pixels_on_lines_batch + 1e-8)).cpu().numpy()
|
| 834 |
+
|
| 835 |
+
# Fill in scores for valid indices
|
| 836 |
+
for batch_idx, valid_idx in enumerate(valid_indices):
|
| 837 |
+
scores[valid_idx] = min(1.0, max(0.0, float(scores_batch[batch_idx])))
|
| 838 |
+
|
| 839 |
+
return scores
|
| 840 |
+
|
| 841 |
+
except Exception as e:
|
| 842 |
+
logger.error(f"Batch GPU evaluation failed: {e}, falling back to sequential CPU")
|
| 843 |
+
return [
|
| 844 |
+
evaluate_keypoints_for_frame(
|
| 845 |
+
template_keypoints, kp, frame, floor_markings_template
|
| 846 |
+
)
|
| 847 |
+
for kp, frame in zip(frame_keypoints_list, frames)
|
| 848 |
+
]
|
| 849 |
+
|
| 850 |
+
|
| 851 |
+
def evaluate_keypoints_batch_for_frame(
|
| 852 |
+
template_keypoints: List[Tuple[int, int]],
|
| 853 |
+
frame_keypoints_list: List[List[Tuple[int, int]]],
|
| 854 |
+
frame: ndarray,
|
| 855 |
+
floor_markings_template: ndarray,
|
| 856 |
+
device: str = "cuda",
|
| 857 |
+
batch_size: int = 32,
|
| 858 |
+
) -> List[float]:
|
| 859 |
+
"""
|
| 860 |
+
Fast batch GPU evaluation of multiple keypoint sets for a single frame.
|
| 861 |
+
|
| 862 |
+
This function evaluates multiple keypoint sets (e.g., from different models)
|
| 863 |
+
for the same frame using batch GPU processing, which is much faster than
|
| 864 |
+
evaluating them sequentially.
|
| 865 |
+
|
| 866 |
+
Args:
|
| 867 |
+
template_keypoints: Template keypoint coordinates
|
| 868 |
+
frame_keypoints_list: List of frame keypoint coordinate sets to evaluate
|
| 869 |
+
frame: Single frame image (same for all keypoint sets)
|
| 870 |
+
floor_markings_template: Template image
|
| 871 |
+
device: "cuda" or "cpu"
|
| 872 |
+
batch_size: Number of keypoint sets to process in each GPU batch
|
| 873 |
+
|
| 874 |
+
Returns:
|
| 875 |
+
List of scores (one per keypoint set) between 0.0 and 1.0
|
| 876 |
+
"""
|
| 877 |
+
if len(frame_keypoints_list) == 0:
|
| 878 |
+
return []
|
| 879 |
+
|
| 880 |
+
if len(frame_keypoints_list) == 1:
|
| 881 |
+
# Single evaluation - use regular function
|
| 882 |
+
return [evaluate_keypoints_for_frame_opencv_cuda(
|
| 883 |
+
template_keypoints=template_keypoints,
|
| 884 |
+
frame_keypoints=frame_keypoints_list[0],
|
| 885 |
+
frame=frame,
|
| 886 |
+
floor_markings_template=floor_markings_template,
|
| 887 |
+
device=device
|
| 888 |
+
)]
|
| 889 |
+
|
| 890 |
+
# For multiple keypoint sets, use batch processing
|
| 891 |
+
# Create list of frames (same frame repeated)
|
| 892 |
+
frames_list = [frame] * len(frame_keypoints_list)
|
| 893 |
+
|
| 894 |
+
# Use batch GPU evaluation
|
| 895 |
+
try:
|
| 896 |
+
scores = evaluate_keypoints_batch_gpu(
|
| 897 |
+
template_keypoints=template_keypoints,
|
| 898 |
+
frame_keypoints_list=frame_keypoints_list,
|
| 899 |
+
frames=frames_list,
|
| 900 |
+
floor_markings_template=floor_markings_template,
|
| 901 |
+
device=device,
|
| 902 |
+
)
|
| 903 |
+
return scores
|
| 904 |
+
except Exception as e:
|
| 905 |
+
logger.warning(f"Batch GPU evaluation failed: {e}, falling back to sequential")
|
| 906 |
+
# Fallback to sequential evaluation
|
| 907 |
+
scores = []
|
| 908 |
+
for frame_keypoints in frame_keypoints_list:
|
| 909 |
+
try:
|
| 910 |
+
score = evaluate_keypoints_for_frame_opencv_cuda(
|
| 911 |
+
template_keypoints=template_keypoints,
|
| 912 |
+
frame_keypoints=frame_keypoints,
|
| 913 |
+
frame=frame,
|
| 914 |
+
floor_markings_template=floor_markings_template,
|
| 915 |
+
device=device
|
| 916 |
+
)
|
| 917 |
+
scores.append(score)
|
| 918 |
+
except Exception as e2:
|
| 919 |
+
logger.debug(f"Error evaluating keypoints: {e2}")
|
| 920 |
+
scores.append(0.0)
|
| 921 |
+
return scores
|
| 922 |
+
|
| 923 |
+
|
| 924 |
+
def load_template_from_file(
|
| 925 |
+
template_image_path: str,
|
| 926 |
+
) -> Tuple[ndarray, List[Tuple[int, int]]]:
|
| 927 |
+
"""
|
| 928 |
+
Load template image and use TEMPLATE_KEYPOINTS constant for keypoints.
|
| 929 |
+
|
| 930 |
+
Args:
|
| 931 |
+
template_image_path: Path to template image file
|
| 932 |
+
|
| 933 |
+
Returns:
|
| 934 |
+
template_image: Loaded template image
|
| 935 |
+
template_keypoints: List of (x, y) keypoint coordinates from TEMPLATE_KEYPOINTS constant
|
| 936 |
+
"""
|
| 937 |
+
# Load template image
|
| 938 |
+
template_image = cv2.imread(template_image_path)
|
| 939 |
+
if template_image is None:
|
| 940 |
+
raise ValueError(f"Could not load template image from {template_image_path}")
|
| 941 |
+
|
| 942 |
+
# Use TEMPLATE_KEYPOINTS constant
|
| 943 |
+
if len(TEMPLATE_KEYPOINTS) == 0:
|
| 944 |
+
raise ValueError(
|
| 945 |
+
"TEMPLATE_KEYPOINTS constant is empty. Please define keypoints in keypoint_evaluation.py"
|
| 946 |
+
)
|
| 947 |
+
|
| 948 |
+
if len(TEMPLATE_KEYPOINTS) < 4:
|
| 949 |
+
raise ValueError(f"TEMPLATE_KEYPOINTS must have at least 4 keypoints, found {len(TEMPLATE_KEYPOINTS)}")
|
| 950 |
+
|
| 951 |
+
logger.info(f"Loaded template image: {template_image_path}")
|
| 952 |
+
logger.info(f"Using TEMPLATE_KEYPOINTS constant with {len(TEMPLATE_KEYPOINTS)} keypoints")
|
| 953 |
+
|
| 954 |
+
return template_image, TEMPLATE_KEYPOINTS
|
| 955 |
+
|
| 956 |
+
|
keypoint_helper.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import numpy as np
|
| 3 |
+
from typing import List, Tuple, Sequence, Any
|
| 4 |
+
|
| 5 |
+
FOOTBALL_KEYPOINTS: list[tuple[int, int]] = [
|
| 6 |
+
(0, 0), # 1
|
| 7 |
+
(0, 0), # 2
|
| 8 |
+
(0, 0), # 3
|
| 9 |
+
(0, 0), # 4
|
| 10 |
+
(0, 0), # 5
|
| 11 |
+
(0, 0), # 6
|
| 12 |
+
|
| 13 |
+
(0, 0), # 7
|
| 14 |
+
(0, 0), # 8
|
| 15 |
+
(0, 0), # 9
|
| 16 |
+
|
| 17 |
+
(0, 0), # 10
|
| 18 |
+
(0, 0), # 11
|
| 19 |
+
(0, 0), # 12
|
| 20 |
+
(0, 0), # 13
|
| 21 |
+
|
| 22 |
+
(0, 0), # 14
|
| 23 |
+
(527, 283), # 15
|
| 24 |
+
(527, 403), # 16
|
| 25 |
+
(0, 0), # 17
|
| 26 |
+
|
| 27 |
+
(0, 0), # 18
|
| 28 |
+
(0, 0), # 19
|
| 29 |
+
(0, 0), # 20
|
| 30 |
+
(0, 0), # 21
|
| 31 |
+
|
| 32 |
+
(0, 0), # 22
|
| 33 |
+
|
| 34 |
+
(0, 0), # 23
|
| 35 |
+
(0, 0), # 24
|
| 36 |
+
|
| 37 |
+
(0, 0), # 25
|
| 38 |
+
(0, 0), # 26
|
| 39 |
+
(0, 0), # 27
|
| 40 |
+
(0, 0), # 28
|
| 41 |
+
(0, 0), # 29
|
| 42 |
+
(0, 0), # 30
|
| 43 |
+
|
| 44 |
+
(405, 340), # 31
|
| 45 |
+
(645, 340), # 32
|
| 46 |
+
]
|
| 47 |
+
|
| 48 |
+
def convert_keypoints_to_val_format(keypoints):
|
| 49 |
+
return [tuple(int(x) for x in pair) for pair in keypoints]
|
| 50 |
+
|
| 51 |
+
def predict_failed_indices(results_frames: Sequence[Any]) -> list[int]:
|
| 52 |
+
|
| 53 |
+
max_frames = len(results_frames)
|
| 54 |
+
if max_frames == 0:
|
| 55 |
+
return []
|
| 56 |
+
|
| 57 |
+
failed_indices: list[int] = []
|
| 58 |
+
for frame_index, frame_result in enumerate(results_frames):
|
| 59 |
+
frame_keypoints = getattr(frame_result, "keypoints", []) or []
|
| 60 |
+
non_zero_count = sum(1 for (x, y) in frame_keypoints if int(x) != 0 and int(y) != 0)
|
| 61 |
+
if non_zero_count <= 4:
|
| 62 |
+
failed_indices.append(frame_index)
|
| 63 |
+
return failed_indices
|
| 64 |
+
|
| 65 |
+
def _generate_sparse_template_keypoints(frame_width: int, frame_height: int) -> list[tuple[int, int]]:
|
| 66 |
+
template_max_x, template_max_y = (1045, 675)
|
| 67 |
+
sx = float(frame_width) / float(template_max_x if template_max_x != 0 else 1)
|
| 68 |
+
sy = float(frame_height) / float(template_max_y if template_max_y != 0 else 1)
|
| 69 |
+
scaled: list[tuple[int, int]] = []
|
| 70 |
+
for i in range(32):
|
| 71 |
+
tx, ty = FOOTBALL_KEYPOINTS[i]
|
| 72 |
+
x_scaled = int(round(tx * sx))
|
| 73 |
+
y_scaled = int(round(ty * sy))
|
| 74 |
+
scaled.append((x_scaled, y_scaled))
|
| 75 |
+
return scaled
|
| 76 |
+
|
| 77 |
+
def fix_keypoints(
|
| 78 |
+
results_frames: Sequence[Any],
|
| 79 |
+
failed_indices: Sequence[int],
|
| 80 |
+
frame_width: int,
|
| 81 |
+
frame_height: int,
|
| 82 |
+
) -> list[Any]:
|
| 83 |
+
max_frames = len(results_frames)
|
| 84 |
+
if max_frames == 0:
|
| 85 |
+
return list(results_frames)
|
| 86 |
+
|
| 87 |
+
failed_set = set(int(i) for i in failed_indices)
|
| 88 |
+
all_indices = list(range(max_frames))
|
| 89 |
+
successful_indices = [i for i in all_indices if i not in failed_set]
|
| 90 |
+
|
| 91 |
+
if len(successful_indices) == 0:
|
| 92 |
+
sparse_template = _generate_sparse_template_keypoints(frame_width, frame_height)
|
| 93 |
+
for frame_result in results_frames:
|
| 94 |
+
setattr(frame_result, "keypoints", list(convert_keypoints_to_val_format(sparse_template)))
|
| 95 |
+
return list(results_frames)
|
| 96 |
+
|
| 97 |
+
seed_index = successful_indices[0]
|
| 98 |
+
seed_kps_raw = getattr(results_frames[seed_index], "keypoints", []) or []
|
| 99 |
+
last_success_kps = convert_keypoints_to_val_format(seed_kps_raw)
|
| 100 |
+
|
| 101 |
+
for frame_index in range(max_frames):
|
| 102 |
+
frame_result = results_frames[frame_index]
|
| 103 |
+
if frame_index in failed_set:
|
| 104 |
+
setattr(frame_result, "keypoints", list(last_success_kps))
|
| 105 |
+
else:
|
| 106 |
+
current_kps_raw = getattr(frame_result, "keypoints", []) or []
|
| 107 |
+
current_kps = convert_keypoints_to_val_format(current_kps_raw)
|
| 108 |
+
setattr(frame_result, "keypoints", list(current_kps))
|
| 109 |
+
last_success_kps = current_kps
|
| 110 |
+
|
| 111 |
+
return list(results_frames)
|
| 112 |
+
|
| 113 |
+
def run_keypoints_post_processing(results_frames: Sequence[Any], frame_width: int, frame_height: int) -> list[Any]:
|
| 114 |
+
failed_indices = predict_failed_indices(results_frames)
|
| 115 |
+
return fix_keypoints(results_frames, failed_indices, frame_width, frame_height)
|
keypoint_helper_v2.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
keypoint_helper_v2_optimized.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
miner.py
ADDED
|
@@ -0,0 +1,881 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
from typing import List, Tuple, Dict, Optional
|
| 3 |
+
import sys
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
from numpy import ndarray
|
| 7 |
+
from pydantic import BaseModel
|
| 8 |
+
|
| 9 |
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
| 10 |
+
from keypoint_helper import run_keypoints_post_processing
|
| 11 |
+
from keypoint_helper_v2 import run_keypoints_post_processing as run_keypoints_post_processing_v2
|
| 12 |
+
|
| 13 |
+
from ultralytics import YOLO
|
| 14 |
+
from team_cluster import TeamClassifier
|
| 15 |
+
from utils import (
|
| 16 |
+
BoundingBox,
|
| 17 |
+
Constants,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
import time
|
| 21 |
+
import torch
|
| 22 |
+
import gc
|
| 23 |
+
import cv2
|
| 24 |
+
import numpy as np
|
| 25 |
+
from collections import defaultdict
|
| 26 |
+
from pitch import process_batch_input, get_cls_net
|
| 27 |
+
from keypoint_evaluation import (
|
| 28 |
+
evaluate_keypoints_for_frame,
|
| 29 |
+
evaluate_keypoints_for_frame_gpu,
|
| 30 |
+
load_template_from_file,
|
| 31 |
+
evaluate_keypoints_for_frame_opencv_cuda,
|
| 32 |
+
evaluate_keypoints_batch_for_frame,
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
import yaml
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class BoundingBox(BaseModel):
|
| 39 |
+
x1: int
|
| 40 |
+
y1: int
|
| 41 |
+
x2: int
|
| 42 |
+
y2: int
|
| 43 |
+
cls_id: int
|
| 44 |
+
conf: float
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class TVFrameResult(BaseModel):
|
| 48 |
+
frame_id: int
|
| 49 |
+
boxes: List[BoundingBox]
|
| 50 |
+
keypoints: List[Tuple[int, int]]
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class Miner:
|
| 54 |
+
SMALL_CONTAINED_IOA = Constants.SMALL_CONTAINED_IOA
|
| 55 |
+
SMALL_RATIO_MAX = Constants.SMALL_RATIO_MAX
|
| 56 |
+
SINGLE_PLAYER_HUE_PIVOT = Constants.SINGLE_PLAYER_HUE_PIVOT
|
| 57 |
+
CORNER_INDICES = Constants.CORNER_INDICES
|
| 58 |
+
KEYPOINTS_CONFIDENCE = Constants.KEYPOINTS_CONFIDENCE
|
| 59 |
+
CORNER_CONFIDENCE = Constants.CORNER_CONFIDENCE
|
| 60 |
+
GOALKEEPER_POSITION_MARGIN = Constants.GOALKEEPER_POSITION_MARGIN
|
| 61 |
+
MIN_SAMPLES_FOR_FIT = 16 # Minimum player crops needed before fitting TeamClassifier
|
| 62 |
+
MAX_SAMPLES_FOR_FIT = 600 # Maximum samples to avoid overfitting
|
| 63 |
+
|
| 64 |
+
def __init__(self, path_hf_repo: Path) -> None:
|
| 65 |
+
try:
|
| 66 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 67 |
+
model_path = path_hf_repo / "detection.onnx"
|
| 68 |
+
self.bbox_model = YOLO(model_path)
|
| 69 |
+
|
| 70 |
+
print(f"BBox Model Loaded: class name {self.bbox_model.names}")
|
| 71 |
+
|
| 72 |
+
team_model_path = path_hf_repo / "osnet_model.pth.tar-100"
|
| 73 |
+
self.team_classifier = TeamClassifier(
|
| 74 |
+
device=device,
|
| 75 |
+
batch_size=32,
|
| 76 |
+
model_name=str(team_model_path)
|
| 77 |
+
)
|
| 78 |
+
print("Team Classifier Loaded")
|
| 79 |
+
|
| 80 |
+
# Team classification state
|
| 81 |
+
self.team_classifier_fitted = False
|
| 82 |
+
self.player_crops_for_fit = []
|
| 83 |
+
|
| 84 |
+
self.keypoints_model_yolo = YOLO(path_hf_repo / "keypoint.pt")
|
| 85 |
+
|
| 86 |
+
model_kp_path = path_hf_repo / 'keypoint'
|
| 87 |
+
config_kp_path = path_hf_repo / 'hrnetv2_w48.yaml'
|
| 88 |
+
cfg_kp = yaml.safe_load(open(config_kp_path, 'r'))
|
| 89 |
+
|
| 90 |
+
loaded_state_kp = torch.load(model_kp_path, map_location=device)
|
| 91 |
+
model = get_cls_net(cfg_kp)
|
| 92 |
+
model.load_state_dict(loaded_state_kp)
|
| 93 |
+
model.to(device)
|
| 94 |
+
model.eval()
|
| 95 |
+
|
| 96 |
+
self.keypoints_model = model
|
| 97 |
+
print("Keypoints Model (keypoint.pt) Loaded")
|
| 98 |
+
|
| 99 |
+
template_image_path = path_hf_repo / "football_pitch_template.png"
|
| 100 |
+
self.template_image, self.template_keypoints = load_template_from_file(str(template_image_path))
|
| 101 |
+
|
| 102 |
+
self.kp_threshold = 0.1
|
| 103 |
+
self.pitch_batch_size = 4
|
| 104 |
+
self.health = "healthy"
|
| 105 |
+
|
| 106 |
+
print("✅ Keypoints Model Loaded")
|
| 107 |
+
except Exception as e:
|
| 108 |
+
self.health = "❌ Miner initialization failed: " + str(e)
|
| 109 |
+
print(self.health)
|
| 110 |
+
|
| 111 |
+
def __repr__(self) -> str:
|
| 112 |
+
if self.health == 'healthy':
|
| 113 |
+
return (
|
| 114 |
+
f"health: {self.health}\n"
|
| 115 |
+
f"BBox Model: {type(self.bbox_model).__name__}\n"
|
| 116 |
+
f"Keypoints Model: {type(self.keypoints_model).__name__}"
|
| 117 |
+
)
|
| 118 |
+
else:
|
| 119 |
+
return self.health
|
| 120 |
+
|
| 121 |
+
def _calculate_iou(self, box1: Tuple[float, float, float, float],
|
| 122 |
+
box2: Tuple[float, float, float, float]) -> float:
|
| 123 |
+
"""
|
| 124 |
+
Calculate Intersection over Union (IoU) between two bounding boxes.
|
| 125 |
+
Args:
|
| 126 |
+
box1: (x1, y1, x2, y2)
|
| 127 |
+
box2: (x1, y1, x2, y2)
|
| 128 |
+
Returns:
|
| 129 |
+
IoU score (0-1)
|
| 130 |
+
"""
|
| 131 |
+
x1_1, y1_1, x2_1, y2_1 = box1
|
| 132 |
+
x1_2, y1_2, x2_2, y2_2 = box2
|
| 133 |
+
|
| 134 |
+
# Calculate intersection area
|
| 135 |
+
x_left = max(x1_1, x1_2)
|
| 136 |
+
y_top = max(y1_1, y1_2)
|
| 137 |
+
x_right = min(x2_1, x2_2)
|
| 138 |
+
y_bottom = min(y2_1, y2_2)
|
| 139 |
+
|
| 140 |
+
if x_right < x_left or y_bottom < y_top:
|
| 141 |
+
return 0.0
|
| 142 |
+
|
| 143 |
+
intersection_area = (x_right - x_left) * (y_bottom - y_top)
|
| 144 |
+
|
| 145 |
+
# Calculate union area
|
| 146 |
+
box1_area = (x2_1 - x1_1) * (y2_1 - y1_1)
|
| 147 |
+
box2_area = (x2_2 - x1_2) * (y2_2 - y1_2)
|
| 148 |
+
union_area = box1_area + box2_area - intersection_area
|
| 149 |
+
|
| 150 |
+
if union_area == 0:
|
| 151 |
+
return 0.0
|
| 152 |
+
|
| 153 |
+
return intersection_area / union_area
|
| 154 |
+
|
| 155 |
+
def _extract_jersey_region(self, crop: ndarray) -> ndarray:
|
| 156 |
+
"""
|
| 157 |
+
Extract jersey region (upper body) from player crop.
|
| 158 |
+
For close-ups, focuses on upper 60%, for distant shots uses full crop.
|
| 159 |
+
"""
|
| 160 |
+
if crop is None or crop.size == 0:
|
| 161 |
+
return crop
|
| 162 |
+
|
| 163 |
+
h, w = crop.shape[:2]
|
| 164 |
+
if h < 10 or w < 10:
|
| 165 |
+
return crop
|
| 166 |
+
|
| 167 |
+
# For close-up shots, extract upper body (jersey region)
|
| 168 |
+
is_closeup = h > 100 or (h * w) > 12000
|
| 169 |
+
if is_closeup:
|
| 170 |
+
# Upper 60% of the crop (jersey area, avoiding shorts)
|
| 171 |
+
jersey_top = 0
|
| 172 |
+
jersey_bottom = int(h * 0.60)
|
| 173 |
+
jersey_left = max(0, int(w * 0.05))
|
| 174 |
+
jersey_right = min(w, int(w * 0.95))
|
| 175 |
+
return crop[jersey_top:jersey_bottom, jersey_left:jersey_right]
|
| 176 |
+
return crop
|
| 177 |
+
|
| 178 |
+
def _extract_color_signature(self, crop: ndarray) -> Optional[np.ndarray]:
|
| 179 |
+
"""
|
| 180 |
+
Extract color signature from jersey region using HSV and LAB color spaces.
|
| 181 |
+
Returns a feature vector with dominant colors and color statistics.
|
| 182 |
+
"""
|
| 183 |
+
if crop is None or crop.size == 0:
|
| 184 |
+
return None
|
| 185 |
+
|
| 186 |
+
jersey_region = self._extract_jersey_region(crop)
|
| 187 |
+
if jersey_region.size == 0:
|
| 188 |
+
return None
|
| 189 |
+
|
| 190 |
+
try:
|
| 191 |
+
# Convert to HSV and LAB color spaces
|
| 192 |
+
hsv = cv2.cvtColor(jersey_region, cv2.COLOR_BGR2HSV)
|
| 193 |
+
lab = cv2.cvtColor(jersey_region, cv2.COLOR_BGR2LAB)
|
| 194 |
+
|
| 195 |
+
# Reshape for processing
|
| 196 |
+
hsv_flat = hsv.reshape(-1, 3).astype(np.float32)
|
| 197 |
+
lab_flat = lab.reshape(-1, 3).astype(np.float32)
|
| 198 |
+
|
| 199 |
+
# Compute statistics for HSV
|
| 200 |
+
hsv_mean = np.mean(hsv_flat, axis=0) / 255.0
|
| 201 |
+
hsv_std = np.std(hsv_flat, axis=0) / 255.0
|
| 202 |
+
|
| 203 |
+
# Compute statistics for LAB
|
| 204 |
+
lab_mean = np.mean(lab_flat, axis=0) / 255.0
|
| 205 |
+
lab_std = np.std(lab_flat, axis=0) / 255.0
|
| 206 |
+
|
| 207 |
+
# Dominant color (most frequent hue)
|
| 208 |
+
hue_hist, _ = np.histogram(hsv_flat[:, 0], bins=36, range=(0, 180))
|
| 209 |
+
dominant_hue = np.argmax(hue_hist) * 5 # Convert to hue value
|
| 210 |
+
|
| 211 |
+
# Combine features
|
| 212 |
+
color_features = np.concatenate([
|
| 213 |
+
hsv_mean,
|
| 214 |
+
hsv_std,
|
| 215 |
+
lab_mean[:2], # L and A channels (B is less informative)
|
| 216 |
+
lab_std[:2],
|
| 217 |
+
[dominant_hue / 180.0] # Normalized dominant hue
|
| 218 |
+
])
|
| 219 |
+
|
| 220 |
+
return color_features
|
| 221 |
+
except Exception as e:
|
| 222 |
+
print(f"Error extracting color signature: {e}")
|
| 223 |
+
return None
|
| 224 |
+
|
| 225 |
+
def _get_spatial_position(self, bbox: Tuple[float, float, float, float],
|
| 226 |
+
frame_width: int, frame_height: int) -> Tuple[float, float]:
|
| 227 |
+
"""
|
| 228 |
+
Get normalized spatial position of player on the pitch.
|
| 229 |
+
Returns (x_normalized, y_normalized) where 0,0 is top-left.
|
| 230 |
+
"""
|
| 231 |
+
x1, y1, x2, y2 = bbox
|
| 232 |
+
center_x = (x1 + x2) / 2.0
|
| 233 |
+
center_y = (y1 + y2) / 2.0
|
| 234 |
+
|
| 235 |
+
# Normalize to [0, 1]
|
| 236 |
+
x_norm = center_x / frame_width if frame_width > 0 else 0.5
|
| 237 |
+
y_norm = center_y / frame_height if frame_height > 0 else 0.5
|
| 238 |
+
|
| 239 |
+
return (x_norm, y_norm)
|
| 240 |
+
|
| 241 |
+
def _find_best_match(self, target_box: Tuple[float, float, float, float],
|
| 242 |
+
predicted_frame_data: Dict[int, Tuple[Tuple, str]],
|
| 243 |
+
iou_threshold: float) -> Tuple[Optional[str], float]:
|
| 244 |
+
"""
|
| 245 |
+
Find best matching box in predicted frame data using IoU.
|
| 246 |
+
"""
|
| 247 |
+
best_iou = 0.0
|
| 248 |
+
best_team_id = None
|
| 249 |
+
|
| 250 |
+
for idx, (bbox, team_cls_id) in predicted_frame_data.items():
|
| 251 |
+
iou = self._calculate_iou(target_box, bbox)
|
| 252 |
+
if iou > best_iou and iou >= iou_threshold:
|
| 253 |
+
best_iou = iou
|
| 254 |
+
best_team_id = team_cls_id
|
| 255 |
+
|
| 256 |
+
return (best_team_id, best_iou)
|
| 257 |
+
|
| 258 |
+
def _detect_objects_batch(self, decoded_images: List[ndarray]) -> Dict[int, List[BoundingBox]]:
|
| 259 |
+
batch_size = 16
|
| 260 |
+
detection_results = []
|
| 261 |
+
n_frames = len(decoded_images)
|
| 262 |
+
for frame_number in range(0, n_frames, batch_size):
|
| 263 |
+
batch_images = decoded_images[frame_number: frame_number + batch_size]
|
| 264 |
+
detections = self.bbox_model(batch_images, verbose=False, save=False)
|
| 265 |
+
detection_results.extend(detections)
|
| 266 |
+
|
| 267 |
+
return detection_results
|
| 268 |
+
|
| 269 |
+
def _team_classify(self, detection_results, decoded_images, offset):
|
| 270 |
+
self.team_classifier_fitted = False
|
| 271 |
+
start = time.time()
|
| 272 |
+
# Collect player crops from first batch for fitting
|
| 273 |
+
fit_sample_size = 600
|
| 274 |
+
player_crops_for_fit = []
|
| 275 |
+
|
| 276 |
+
for frame_id in range(len(detection_results)):
|
| 277 |
+
detection_box = detection_results[frame_id].boxes.data
|
| 278 |
+
if len(detection_box) < 4:
|
| 279 |
+
continue
|
| 280 |
+
# Collect player boxes for team classification fitting (first batch only)
|
| 281 |
+
if len(player_crops_for_fit) < fit_sample_size:
|
| 282 |
+
frame_image = decoded_images[frame_id]
|
| 283 |
+
for box in detection_box:
|
| 284 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 285 |
+
if conf < 0.5:
|
| 286 |
+
continue
|
| 287 |
+
mapped_cls_id = str(int(cls_id))
|
| 288 |
+
# Only collect player crops (cls_id = 2)
|
| 289 |
+
if mapped_cls_id == '2':
|
| 290 |
+
crop = frame_image[int(y1):int(y2), int(x1):int(x2)]
|
| 291 |
+
if crop.size > 0:
|
| 292 |
+
player_crops_for_fit.append(crop)
|
| 293 |
+
|
| 294 |
+
# Fit team classifier after collecting samples
|
| 295 |
+
if self.team_classifier and not self.team_classifier_fitted and len(player_crops_for_fit) >= fit_sample_size:
|
| 296 |
+
print(f"Fitting TeamClassifier with {len(player_crops_for_fit)} player crops")
|
| 297 |
+
self.team_classifier.fit(player_crops_for_fit)
|
| 298 |
+
self.team_classifier_fitted = True
|
| 299 |
+
break
|
| 300 |
+
if not self.team_classifier_fitted and len(player_crops_for_fit) >= 16:
|
| 301 |
+
print(f"Fallback: Fitting TeamClassifier with {len(player_crops_for_fit)} player crops")
|
| 302 |
+
self.team_classifier.fit(player_crops_for_fit)
|
| 303 |
+
self.team_classifier_fitted = True
|
| 304 |
+
end = time.time()
|
| 305 |
+
print(f"Fitting Kmeans time: {end - start}")
|
| 306 |
+
|
| 307 |
+
# Second pass: predict teams with configurable frame skipping optimization
|
| 308 |
+
start = time.time()
|
| 309 |
+
|
| 310 |
+
# Get configuration for frame skipping
|
| 311 |
+
prediction_interval = 1 # Default: predict every 2 frames
|
| 312 |
+
iou_threshold = 0.3
|
| 313 |
+
|
| 314 |
+
print(f"Team classification - prediction_interval: {prediction_interval}, iou_threshold: {iou_threshold}")
|
| 315 |
+
|
| 316 |
+
# Storage for predicted frame results: {frame_id: {box_idx: (bbox, team_id)}}
|
| 317 |
+
predicted_frame_data = {}
|
| 318 |
+
|
| 319 |
+
# Step 1: Predict for frames at prediction_interval only
|
| 320 |
+
frames_to_predict = []
|
| 321 |
+
for frame_id in range(len(detection_results)):
|
| 322 |
+
if frame_id % prediction_interval == 0:
|
| 323 |
+
frames_to_predict.append(frame_id)
|
| 324 |
+
|
| 325 |
+
print(f"Predicting teams for {len(frames_to_predict)}/{len(detection_results)} frames "
|
| 326 |
+
f"(saving {100 - (len(frames_to_predict) * 100 // len(detection_results))}% compute)")
|
| 327 |
+
|
| 328 |
+
for frame_id in frames_to_predict:
|
| 329 |
+
detection_box = detection_results[frame_id].boxes.data
|
| 330 |
+
frame_image = decoded_images[frame_id]
|
| 331 |
+
|
| 332 |
+
# Collect player crops for this frame
|
| 333 |
+
frame_player_crops = []
|
| 334 |
+
frame_player_indices = []
|
| 335 |
+
frame_player_boxes = []
|
| 336 |
+
|
| 337 |
+
for idx, box in enumerate(detection_box):
|
| 338 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 339 |
+
if cls_id == 2 and conf < 0.6:
|
| 340 |
+
continue
|
| 341 |
+
mapped_cls_id = str(int(cls_id))
|
| 342 |
+
|
| 343 |
+
# Collect player crops for prediction
|
| 344 |
+
if self.team_classifier and self.team_classifier_fitted and mapped_cls_id == '2':
|
| 345 |
+
crop = frame_image[int(y1):int(y2), int(x1):int(x2)]
|
| 346 |
+
if crop.size > 0:
|
| 347 |
+
frame_player_crops.append(crop)
|
| 348 |
+
frame_player_indices.append(idx)
|
| 349 |
+
frame_player_boxes.append((x1, y1, x2, y2))
|
| 350 |
+
|
| 351 |
+
# Predict teams for all players in this frame
|
| 352 |
+
if len(frame_player_crops) > 0:
|
| 353 |
+
team_ids = self.team_classifier.predict(frame_player_crops)
|
| 354 |
+
predicted_frame_data[frame_id] = {}
|
| 355 |
+
for idx, bbox, team_id in zip(frame_player_indices, frame_player_boxes, team_ids):
|
| 356 |
+
# Map team_id (0,1) to cls_id (6,7)
|
| 357 |
+
team_cls_id = str(6 + int(team_id))
|
| 358 |
+
predicted_frame_data[frame_id][idx] = (bbox, team_cls_id)
|
| 359 |
+
|
| 360 |
+
# Step 2: Process all frames (interpolate skipped frames)
|
| 361 |
+
fallback_count = 0
|
| 362 |
+
interpolated_count = 0
|
| 363 |
+
bboxes: dict[int, list[BoundingBox]] = {}
|
| 364 |
+
for frame_id in range(len(detection_results)):
|
| 365 |
+
detection_box = detection_results[frame_id].boxes.data
|
| 366 |
+
frame_image = decoded_images[frame_id]
|
| 367 |
+
boxes = []
|
| 368 |
+
|
| 369 |
+
team_predictions = {}
|
| 370 |
+
|
| 371 |
+
if frame_id % prediction_interval == 0:
|
| 372 |
+
# Predicted frame: use pre-computed predictions
|
| 373 |
+
if frame_id in predicted_frame_data:
|
| 374 |
+
for idx, (bbox, team_cls_id) in predicted_frame_data[frame_id].items():
|
| 375 |
+
team_predictions[idx] = team_cls_id
|
| 376 |
+
else:
|
| 377 |
+
# Skipped frame: interpolate from neighboring predicted frames
|
| 378 |
+
# Find nearest predicted frames
|
| 379 |
+
prev_predicted_frame = (frame_id // prediction_interval) * prediction_interval
|
| 380 |
+
next_predicted_frame = prev_predicted_frame + prediction_interval
|
| 381 |
+
|
| 382 |
+
# Collect current frame player boxes
|
| 383 |
+
for idx, box in enumerate(detection_box):
|
| 384 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 385 |
+
if cls_id == 2 and conf < 0.6:
|
| 386 |
+
continue
|
| 387 |
+
mapped_cls_id = str(int(cls_id))
|
| 388 |
+
|
| 389 |
+
if self.team_classifier and self.team_classifier_fitted and mapped_cls_id == '2':
|
| 390 |
+
target_box = (x1, y1, x2, y2)
|
| 391 |
+
|
| 392 |
+
# Try to match with previous predicted frame
|
| 393 |
+
best_team_id = None
|
| 394 |
+
best_iou = 0.0
|
| 395 |
+
|
| 396 |
+
if prev_predicted_frame in predicted_frame_data:
|
| 397 |
+
team_id, iou = self._find_best_match(
|
| 398 |
+
target_box,
|
| 399 |
+
predicted_frame_data[prev_predicted_frame],
|
| 400 |
+
iou_threshold
|
| 401 |
+
)
|
| 402 |
+
if team_id is not None:
|
| 403 |
+
best_team_id = team_id
|
| 404 |
+
best_iou = iou
|
| 405 |
+
|
| 406 |
+
# Try to match with next predicted frame if available and no good match yet
|
| 407 |
+
if best_team_id is None and next_predicted_frame < len(detection_results):
|
| 408 |
+
if next_predicted_frame in predicted_frame_data:
|
| 409 |
+
team_id, iou = self._find_best_match(
|
| 410 |
+
target_box,
|
| 411 |
+
predicted_frame_data[next_predicted_frame],
|
| 412 |
+
iou_threshold
|
| 413 |
+
)
|
| 414 |
+
if team_id is not None and iou > best_iou:
|
| 415 |
+
best_team_id = team_id
|
| 416 |
+
best_iou = iou
|
| 417 |
+
|
| 418 |
+
# Track interpolation success
|
| 419 |
+
if best_team_id is not None:
|
| 420 |
+
interpolated_count += 1
|
| 421 |
+
else:
|
| 422 |
+
# Fallback: if no match found, predict individually
|
| 423 |
+
crop = frame_image[int(y1):int(y2), int(x1):int(x2)]
|
| 424 |
+
if crop.size > 0:
|
| 425 |
+
team_id = self.team_classifier.predict([crop])[0]
|
| 426 |
+
best_team_id = str(6 + int(team_id))
|
| 427 |
+
fallback_count += 1
|
| 428 |
+
|
| 429 |
+
if best_team_id is not None:
|
| 430 |
+
team_predictions[idx] = best_team_id
|
| 431 |
+
|
| 432 |
+
# Parse boxes with team classification
|
| 433 |
+
for idx, box in enumerate(detection_box):
|
| 434 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 435 |
+
if cls_id == 2 and conf < 0.6:
|
| 436 |
+
continue
|
| 437 |
+
|
| 438 |
+
# Check overlap with staff box
|
| 439 |
+
overlap_staff = False
|
| 440 |
+
for idy, boxy in enumerate(detection_box):
|
| 441 |
+
s_x1, s_y1, s_x2, s_y2, s_conf, s_cls_id = boxy.tolist()
|
| 442 |
+
if cls_id == 2 and s_cls_id == 4:
|
| 443 |
+
staff_iou = self._calculate_iou(box[:4], boxy[:4])
|
| 444 |
+
if staff_iou >= 0.8:
|
| 445 |
+
overlap_staff = True
|
| 446 |
+
break
|
| 447 |
+
if overlap_staff:
|
| 448 |
+
continue
|
| 449 |
+
|
| 450 |
+
mapped_cls_id = str(int(cls_id))
|
| 451 |
+
|
| 452 |
+
# Override cls_id for players with team prediction
|
| 453 |
+
if idx in team_predictions:
|
| 454 |
+
mapped_cls_id = team_predictions[idx]
|
| 455 |
+
if mapped_cls_id != '4':
|
| 456 |
+
if int(mapped_cls_id) == 3 and conf < 0.5:
|
| 457 |
+
continue
|
| 458 |
+
boxes.append(
|
| 459 |
+
BoundingBox(
|
| 460 |
+
x1=int(x1),
|
| 461 |
+
y1=int(y1),
|
| 462 |
+
x2=int(x2),
|
| 463 |
+
y2=int(y2),
|
| 464 |
+
cls_id=int(mapped_cls_id),
|
| 465 |
+
conf=float(conf),
|
| 466 |
+
)
|
| 467 |
+
)
|
| 468 |
+
# Handle footballs - keep only the best one
|
| 469 |
+
footballs = [bb for bb in boxes if int(bb.cls_id) == 0]
|
| 470 |
+
if len(footballs) > 1:
|
| 471 |
+
best_ball = max(footballs, key=lambda b: b.conf)
|
| 472 |
+
boxes = [bb for bb in boxes if int(bb.cls_id) != 0]
|
| 473 |
+
boxes.append(best_ball)
|
| 474 |
+
|
| 475 |
+
bboxes[offset + frame_id] = boxes
|
| 476 |
+
return bboxes
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
def predict_batch(self, batch_images: List[ndarray], offset: int, n_keypoints: int) -> List[TVFrameResult]:
|
| 480 |
+
start = time.time()
|
| 481 |
+
detection_results = self._detect_objects_batch(batch_images)
|
| 482 |
+
end = time.time()
|
| 483 |
+
print(f"Detection time: {end - start}")
|
| 484 |
+
|
| 485 |
+
# Use hybrid team classification
|
| 486 |
+
start = time.time()
|
| 487 |
+
bboxes = self._team_classify(detection_results, batch_images, offset)
|
| 488 |
+
end = time.time()
|
| 489 |
+
print(f"Team classify time: {end - start}")
|
| 490 |
+
|
| 491 |
+
# Phase 3: Keypoint Detection
|
| 492 |
+
start = time.time()
|
| 493 |
+
keypoints_yolo: Dict[int, List[Tuple[int, int]]] = {}
|
| 494 |
+
|
| 495 |
+
keypoints_yolo = self._detect_keypoints_batch(batch_images, offset, n_keypoints)
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
pitch_batch_size = min(self.pitch_batch_size, len(batch_images))
|
| 499 |
+
keypoints: Dict[int, List[Tuple[int, int]]] = {}
|
| 500 |
+
|
| 501 |
+
start = time.time()
|
| 502 |
+
last_score = 0
|
| 503 |
+
last_valid_keypoints = None
|
| 504 |
+
while True:
|
| 505 |
+
gc.collect()
|
| 506 |
+
if torch.cuda.is_available():
|
| 507 |
+
torch.cuda.empty_cache()
|
| 508 |
+
torch.cuda.synchronize()
|
| 509 |
+
device_str = "cuda"
|
| 510 |
+
keypoints_result = process_batch_input(
|
| 511 |
+
batch_images,
|
| 512 |
+
self.keypoints_model,
|
| 513 |
+
self.kp_threshold,
|
| 514 |
+
device_str,
|
| 515 |
+
batch_size=pitch_batch_size,
|
| 516 |
+
)
|
| 517 |
+
if keypoints_result is not None and len(keypoints_result) > 0:
|
| 518 |
+
for frame_number_in_batch, kp_dict in enumerate(keypoints_result):
|
| 519 |
+
if frame_number_in_batch >= len(batch_images):
|
| 520 |
+
break
|
| 521 |
+
frame_keypoints: List[Tuple[int, int]] = []
|
| 522 |
+
try:
|
| 523 |
+
height, width = batch_images[frame_number_in_batch].shape[:2]
|
| 524 |
+
if kp_dict is not None and isinstance(kp_dict, dict):
|
| 525 |
+
for idx in range(32):
|
| 526 |
+
x, y = 0, 0
|
| 527 |
+
kp_idx = idx + 1
|
| 528 |
+
if kp_idx in kp_dict:
|
| 529 |
+
try:
|
| 530 |
+
kp_data = kp_dict[kp_idx]
|
| 531 |
+
if isinstance(kp_data, dict) and "x" in kp_data and "y" in kp_data:
|
| 532 |
+
x = int(kp_data["x"] * width)
|
| 533 |
+
y = int(kp_data["y"] * height)
|
| 534 |
+
except (KeyError, TypeError, ValueError):
|
| 535 |
+
pass
|
| 536 |
+
frame_keypoints.append((x, y))
|
| 537 |
+
except (IndexError, ValueError, AttributeError):
|
| 538 |
+
frame_keypoints = [(0, 0)] * 32
|
| 539 |
+
if len(frame_keypoints) < n_keypoints:
|
| 540 |
+
frame_keypoints.extend([(0, 0)] * (n_keypoints - len(frame_keypoints)))
|
| 541 |
+
else:
|
| 542 |
+
frame_keypoints = frame_keypoints[:n_keypoints]
|
| 543 |
+
|
| 544 |
+
time1 = time.time()
|
| 545 |
+
frame_keypoints_yolo = keypoints_yolo.get(offset + frame_number_in_batch, frame_keypoints)
|
| 546 |
+
|
| 547 |
+
valid_keypoints_count = 0
|
| 548 |
+
valid_keypoints_yolo_count = 0
|
| 549 |
+
for kp in frame_keypoints:
|
| 550 |
+
if kp[0] != 0.0 or kp[1] != 0.0:
|
| 551 |
+
valid_keypoints_count += 1
|
| 552 |
+
if valid_keypoints_count > 3:
|
| 553 |
+
break
|
| 554 |
+
|
| 555 |
+
for kp in frame_keypoints_yolo:
|
| 556 |
+
if kp[0] != 0.0 or kp[1] != 0.0:
|
| 557 |
+
valid_keypoints_yolo_count += 1
|
| 558 |
+
if valid_keypoints_yolo_count > 3:
|
| 559 |
+
break
|
| 560 |
+
|
| 561 |
+
# Evaluate and select best keypoints (using batch evaluation for speed)
|
| 562 |
+
if valid_keypoints_count > 3 and valid_keypoints_yolo_count > 3:
|
| 563 |
+
try:
|
| 564 |
+
last_valid_keypoints = keypoints.get(offset + frame_number_in_batch - 1, frame_keypoints)
|
| 565 |
+
# Evaluate both keypoint sets in batch (much faster!)
|
| 566 |
+
scores = evaluate_keypoints_batch_for_frame(
|
| 567 |
+
template_keypoints=self.template_keypoints,
|
| 568 |
+
frame_keypoints_list=[frame_keypoints, frame_keypoints_yolo, last_valid_keypoints],
|
| 569 |
+
frame=batch_images[frame_number_in_batch],
|
| 570 |
+
floor_markings_template=self.template_image,
|
| 571 |
+
device="cuda"
|
| 572 |
+
)
|
| 573 |
+
score = scores[0]
|
| 574 |
+
score_yolo = scores[1]
|
| 575 |
+
last_score = scores[2]
|
| 576 |
+
|
| 577 |
+
if last_score > score and last_score > score_yolo:
|
| 578 |
+
frame_keypoints = last_valid_keypoints
|
| 579 |
+
elif score_yolo > score:
|
| 580 |
+
frame_keypoints = frame_keypoints_yolo
|
| 581 |
+
last_score = score_yolo
|
| 582 |
+
else:
|
| 583 |
+
last_score = score
|
| 584 |
+
|
| 585 |
+
last_valid_keypoints = frame_keypoints
|
| 586 |
+
|
| 587 |
+
except Exception as e:
|
| 588 |
+
# Fallback: use YOLO if available, otherwise use pitch model
|
| 589 |
+
if valid_keypoints_yolo_count > 3:
|
| 590 |
+
frame_keypoints = frame_keypoints_yolo
|
| 591 |
+
elif valid_keypoints_yolo_count > 3:
|
| 592 |
+
# Only YOLO has valid keypoints
|
| 593 |
+
frame_keypoints = frame_keypoints_yolo
|
| 594 |
+
else:
|
| 595 |
+
if last_valid_keypoints is not None:
|
| 596 |
+
frame_keypoints = last_valid_keypoints
|
| 597 |
+
|
| 598 |
+
time2 = time.time()
|
| 599 |
+
print(f"Keypoint evaluation time: {time2 - time1}")
|
| 600 |
+
|
| 601 |
+
keypoints[offset + frame_number_in_batch] = frame_keypoints
|
| 602 |
+
break
|
| 603 |
+
end = time.time()
|
| 604 |
+
print(f"Keypoint time: {end - start}")
|
| 605 |
+
|
| 606 |
+
results: List[TVFrameResult] = []
|
| 607 |
+
for frame_number in range(offset, offset + len(batch_images)):
|
| 608 |
+
frame_boxes = bboxes.get(frame_number, [])
|
| 609 |
+
result = TVFrameResult(
|
| 610 |
+
frame_id=frame_number,
|
| 611 |
+
boxes=frame_boxes,
|
| 612 |
+
keypoints=keypoints.get(
|
| 613 |
+
frame_number,
|
| 614 |
+
[(0, 0) for _ in range(n_keypoints)],
|
| 615 |
+
),
|
| 616 |
+
)
|
| 617 |
+
results.append(result)
|
| 618 |
+
|
| 619 |
+
start = time.time()
|
| 620 |
+
if len(batch_images) > 0:
|
| 621 |
+
h, w = batch_images[0].shape[:2]
|
| 622 |
+
results = run_keypoints_post_processing_v2(
|
| 623 |
+
results, w, h,
|
| 624 |
+
frames=batch_images,
|
| 625 |
+
template_keypoints=self.template_keypoints,
|
| 626 |
+
floor_markings_template=self.template_image,
|
| 627 |
+
offset=offset
|
| 628 |
+
)
|
| 629 |
+
end = time.time()
|
| 630 |
+
print(f"Keypoint post processing time: {end - start}")
|
| 631 |
+
|
| 632 |
+
gc.collect()
|
| 633 |
+
if torch.cuda.is_available():
|
| 634 |
+
torch.cuda.empty_cache()
|
| 635 |
+
torch.cuda.synchronize()
|
| 636 |
+
|
| 637 |
+
return results
|
| 638 |
+
|
| 639 |
+
def _detect_keypoints_batch(self, batch_images: List[ndarray],
|
| 640 |
+
offset: int, n_keypoints: int) -> Dict[int, List[Tuple[int, int]]]:
|
| 641 |
+
"""
|
| 642 |
+
Phase 3: Keypoint detection for all frames in batch.
|
| 643 |
+
|
| 644 |
+
Args:
|
| 645 |
+
batch_images: List of images to process
|
| 646 |
+
offset: Frame offset for numbering
|
| 647 |
+
n_keypoints: Number of keypoints expected
|
| 648 |
+
|
| 649 |
+
Returns:
|
| 650 |
+
Dictionary mapping frame_id to list of keypoint coordinates
|
| 651 |
+
"""
|
| 652 |
+
keypoints: Dict[int, List[Tuple[int, int]]] = {}
|
| 653 |
+
keypoints_model_results = self.keypoints_model_yolo.predict(batch_images)
|
| 654 |
+
|
| 655 |
+
if keypoints_model_results is None:
|
| 656 |
+
return keypoints
|
| 657 |
+
|
| 658 |
+
for frame_idx_in_batch, detection in enumerate(keypoints_model_results):
|
| 659 |
+
if not hasattr(detection, "keypoints") or detection.keypoints is None:
|
| 660 |
+
continue
|
| 661 |
+
|
| 662 |
+
# Extract keypoints with confidence
|
| 663 |
+
frame_keypoints_with_conf: List[Tuple[int, int, float]] = []
|
| 664 |
+
for i, part_points in enumerate(detection.keypoints.data):
|
| 665 |
+
for k_id, (x, y, _) in enumerate(part_points):
|
| 666 |
+
confidence = float(detection.keypoints.conf[i][k_id])
|
| 667 |
+
frame_keypoints_with_conf.append((int(x), int(y), confidence))
|
| 668 |
+
|
| 669 |
+
# Pad or truncate to expected number of keypoints
|
| 670 |
+
if len(frame_keypoints_with_conf) < n_keypoints:
|
| 671 |
+
frame_keypoints_with_conf.extend(
|
| 672 |
+
[(0, 0, 0.0)] * (n_keypoints - len(frame_keypoints_with_conf))
|
| 673 |
+
)
|
| 674 |
+
else:
|
| 675 |
+
frame_keypoints_with_conf = frame_keypoints_with_conf[:n_keypoints]
|
| 676 |
+
|
| 677 |
+
# Filter keypoints based on confidence thresholds
|
| 678 |
+
filtered_keypoints: List[Tuple[int, int]] = []
|
| 679 |
+
for idx, (x, y, confidence) in enumerate(frame_keypoints_with_conf):
|
| 680 |
+
if idx in self.CORNER_INDICES:
|
| 681 |
+
# Corner keypoints have lower confidence threshold
|
| 682 |
+
if confidence < 0.3:
|
| 683 |
+
filtered_keypoints.append((0, 0))
|
| 684 |
+
else:
|
| 685 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 686 |
+
else:
|
| 687 |
+
# Regular keypoints
|
| 688 |
+
if confidence < 0.5:
|
| 689 |
+
filtered_keypoints.append((0, 0))
|
| 690 |
+
else:
|
| 691 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 692 |
+
|
| 693 |
+
frame_id = offset + frame_idx_in_batch
|
| 694 |
+
keypoints[frame_id] = filtered_keypoints
|
| 695 |
+
|
| 696 |
+
return keypoints
|
| 697 |
+
|
| 698 |
+
def predict_keypoints(
|
| 699 |
+
self,
|
| 700 |
+
images: List[ndarray],
|
| 701 |
+
n_keypoints: int = 32,
|
| 702 |
+
batch_size: Optional[int] = None,
|
| 703 |
+
conf_threshold: float = 0.5,
|
| 704 |
+
corner_conf_threshold: float = 0.3,
|
| 705 |
+
verbose: bool = False
|
| 706 |
+
) -> Dict[int, List[Tuple[int, int]]]:
|
| 707 |
+
"""
|
| 708 |
+
Standalone function for keypoint detection on a list of images.
|
| 709 |
+
Optimized for maximum prediction speed.
|
| 710 |
+
|
| 711 |
+
Args:
|
| 712 |
+
images: List of images (numpy arrays) to process
|
| 713 |
+
n_keypoints: Number of keypoints expected per frame (default: 32)
|
| 714 |
+
batch_size: Batch size for YOLO prediction (None = auto, uses all images)
|
| 715 |
+
conf_threshold: Confidence threshold for regular keypoints (default: 0.5)
|
| 716 |
+
corner_conf_threshold: Confidence threshold for corner keypoints (default: 0.3)
|
| 717 |
+
verbose: Whether to print progress information
|
| 718 |
+
|
| 719 |
+
Returns:
|
| 720 |
+
Dictionary mapping frame index to list of keypoint coordinates (x, y)
|
| 721 |
+
Frame indices start from 0
|
| 722 |
+
"""
|
| 723 |
+
if not images:
|
| 724 |
+
return {}
|
| 725 |
+
|
| 726 |
+
keypoints: Dict[int, List[Tuple[int, int]]] = {}
|
| 727 |
+
|
| 728 |
+
# Use provided batch_size or process all at once for maximum speed
|
| 729 |
+
if batch_size is None:
|
| 730 |
+
batch_size = len(images)
|
| 731 |
+
|
| 732 |
+
# Process in batches for optimal GPU utilization
|
| 733 |
+
for batch_start in range(0, len(images), batch_size):
|
| 734 |
+
batch_end = min(batch_start + batch_size, len(images))
|
| 735 |
+
batch_images = images[batch_start:batch_end]
|
| 736 |
+
|
| 737 |
+
if verbose:
|
| 738 |
+
print(f"Processing keypoints batch {batch_start}-{batch_end-1} ({len(batch_images)} images)")
|
| 739 |
+
|
| 740 |
+
# YOLO keypoint prediction (optimized batch processing)
|
| 741 |
+
keypoints_model_results = self.keypoints_model_yolo.predict(
|
| 742 |
+
batch_images,
|
| 743 |
+
verbose=False,
|
| 744 |
+
save=False,
|
| 745 |
+
conf=0.1, # Lower conf for detection, we filter later
|
| 746 |
+
)
|
| 747 |
+
|
| 748 |
+
if keypoints_model_results is None:
|
| 749 |
+
# Fill with empty keypoints for this batch
|
| 750 |
+
for frame_idx in range(batch_start, batch_end):
|
| 751 |
+
keypoints[frame_idx] = [(0, 0)] * n_keypoints
|
| 752 |
+
continue
|
| 753 |
+
|
| 754 |
+
# Process each frame in the batch
|
| 755 |
+
for batch_idx, detection in enumerate(keypoints_model_results):
|
| 756 |
+
frame_idx = batch_start + batch_idx
|
| 757 |
+
|
| 758 |
+
if not hasattr(detection, "keypoints") or detection.keypoints is None:
|
| 759 |
+
keypoints[frame_idx] = [(0, 0)] * n_keypoints
|
| 760 |
+
continue
|
| 761 |
+
|
| 762 |
+
# Extract keypoints with confidence
|
| 763 |
+
frame_keypoints_with_conf: List[Tuple[int, int, float]] = []
|
| 764 |
+
try:
|
| 765 |
+
for i, part_points in enumerate(detection.keypoints.data):
|
| 766 |
+
for k_id, (x, y, _) in enumerate(part_points):
|
| 767 |
+
confidence = float(detection.keypoints.conf[i][k_id])
|
| 768 |
+
frame_keypoints_with_conf.append((int(x), int(y), confidence))
|
| 769 |
+
except (AttributeError, IndexError, TypeError):
|
| 770 |
+
keypoints[frame_idx] = [(0, 0)] * n_keypoints
|
| 771 |
+
continue
|
| 772 |
+
|
| 773 |
+
# Pad or truncate to expected number of keypoints
|
| 774 |
+
if len(frame_keypoints_with_conf) < n_keypoints:
|
| 775 |
+
frame_keypoints_with_conf.extend(
|
| 776 |
+
[(0, 0, 0.0)] * (n_keypoints - len(frame_keypoints_with_conf))
|
| 777 |
+
)
|
| 778 |
+
else:
|
| 779 |
+
frame_keypoints_with_conf = frame_keypoints_with_conf[:n_keypoints]
|
| 780 |
+
|
| 781 |
+
# Filter keypoints based on confidence thresholds
|
| 782 |
+
filtered_keypoints: List[Tuple[int, int]] = []
|
| 783 |
+
for idx, (x, y, confidence) in enumerate(frame_keypoints_with_conf):
|
| 784 |
+
if idx in self.CORNER_INDICES:
|
| 785 |
+
# Corner keypoints have lower confidence threshold
|
| 786 |
+
if confidence < corner_conf_threshold:
|
| 787 |
+
filtered_keypoints.append((0, 0))
|
| 788 |
+
else:
|
| 789 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 790 |
+
else:
|
| 791 |
+
# Regular keypoints
|
| 792 |
+
if confidence < conf_threshold:
|
| 793 |
+
filtered_keypoints.append((0, 0))
|
| 794 |
+
else:
|
| 795 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 796 |
+
|
| 797 |
+
keypoints[frame_idx] = filtered_keypoints
|
| 798 |
+
|
| 799 |
+
return keypoints
|
| 800 |
+
|
| 801 |
+
def predict_objects(
|
| 802 |
+
self,
|
| 803 |
+
images: List[ndarray],
|
| 804 |
+
batch_size: Optional[int] = 16,
|
| 805 |
+
conf_threshold: float = 0.5,
|
| 806 |
+
iou_threshold: float = 0.45,
|
| 807 |
+
classes: Optional[List[int]] = None,
|
| 808 |
+
verbose: bool = False,
|
| 809 |
+
) -> Dict[int, List[BoundingBox]]:
|
| 810 |
+
"""
|
| 811 |
+
Standalone high-throughput object detection function.
|
| 812 |
+
Runs the YOLO detector directly on raw images while skipping
|
| 813 |
+
any team-classification or keypoint stages for maximum FPS.
|
| 814 |
+
|
| 815 |
+
Args:
|
| 816 |
+
images: List of frames (BGR numpy arrays).
|
| 817 |
+
batch_size: Number of frames per inference pass. Use None to process
|
| 818 |
+
all frames at once (fastest but highest memory usage).
|
| 819 |
+
conf_threshold: Detection confidence threshold.
|
| 820 |
+
iou_threshold: IoU threshold for NMS within YOLO.
|
| 821 |
+
classes: Optional list of class IDs to keep (None = all classes).
|
| 822 |
+
verbose: Whether to print per-batch progress from YOLO.
|
| 823 |
+
|
| 824 |
+
Returns:
|
| 825 |
+
Dict mapping frame index -> list of BoundingBox predictions.
|
| 826 |
+
"""
|
| 827 |
+
if not images:
|
| 828 |
+
return {}
|
| 829 |
+
|
| 830 |
+
detections: Dict[int, List[BoundingBox]] = {}
|
| 831 |
+
effective_batch = len(images) if batch_size is None else max(1, batch_size)
|
| 832 |
+
|
| 833 |
+
for batch_start in range(0, len(images), effective_batch):
|
| 834 |
+
batch_end = min(batch_start + effective_batch, len(images))
|
| 835 |
+
batch_images = images[batch_start:batch_end]
|
| 836 |
+
|
| 837 |
+
start = time.time()
|
| 838 |
+
yolo_results = self.bbox_model(
|
| 839 |
+
batch_images,
|
| 840 |
+
conf=conf_threshold,
|
| 841 |
+
iou=iou_threshold,
|
| 842 |
+
classes=classes,
|
| 843 |
+
verbose=verbose,
|
| 844 |
+
save=False,
|
| 845 |
+
)
|
| 846 |
+
end = time.time()
|
| 847 |
+
print(f"YOLO time: {end - start}")
|
| 848 |
+
|
| 849 |
+
for local_idx, result in enumerate(yolo_results):
|
| 850 |
+
frame_idx = batch_start + local_idx
|
| 851 |
+
frame_boxes: List[BoundingBox] = []
|
| 852 |
+
|
| 853 |
+
if not hasattr(result, "boxes") or result.boxes is None:
|
| 854 |
+
detections[frame_idx] = frame_boxes
|
| 855 |
+
continue
|
| 856 |
+
|
| 857 |
+
boxes_tensor = result.boxes.data
|
| 858 |
+
if boxes_tensor is None:
|
| 859 |
+
detections[frame_idx] = frame_boxes
|
| 860 |
+
continue
|
| 861 |
+
|
| 862 |
+
for box in boxes_tensor:
|
| 863 |
+
try:
|
| 864 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 865 |
+
frame_boxes.append(
|
| 866 |
+
BoundingBox(
|
| 867 |
+
x1=int(x1),
|
| 868 |
+
y1=int(y1),
|
| 869 |
+
x2=int(x2),
|
| 870 |
+
y2=int(y2),
|
| 871 |
+
cls_id=int(cls_id),
|
| 872 |
+
conf=float(conf),
|
| 873 |
+
)
|
| 874 |
+
)
|
| 875 |
+
except (ValueError, TypeError):
|
| 876 |
+
continue
|
| 877 |
+
|
| 878 |
+
detections[frame_idx] = frame_boxes
|
| 879 |
+
|
| 880 |
+
return detections
|
| 881 |
+
|
miner1.py
ADDED
|
@@ -0,0 +1,685 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
from typing import List, Tuple, Dict, Optional
|
| 3 |
+
import sys
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
from numpy import ndarray
|
| 7 |
+
from pydantic import BaseModel
|
| 8 |
+
|
| 9 |
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
| 10 |
+
from keypoint_helper import run_keypoints_post_processing
|
| 11 |
+
from keypoint_helper_v2 import run_keypoints_post_processing as run_keypoints_post_processing_v2
|
| 12 |
+
|
| 13 |
+
from ultralytics import YOLO
|
| 14 |
+
from team_cluster import TeamClassifier
|
| 15 |
+
from utils import (
|
| 16 |
+
BoundingBox,
|
| 17 |
+
Constants,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
import time
|
| 21 |
+
import torch
|
| 22 |
+
import gc
|
| 23 |
+
import cv2
|
| 24 |
+
import numpy as np
|
| 25 |
+
from collections import defaultdict
|
| 26 |
+
from pitch import process_batch_input, get_cls_net
|
| 27 |
+
from keypoint_evaluation import (
|
| 28 |
+
evaluate_keypoints_for_frame,
|
| 29 |
+
evaluate_keypoints_for_frame_gpu,
|
| 30 |
+
load_template_from_file,
|
| 31 |
+
evaluate_keypoints_for_frame_opencv_cuda,
|
| 32 |
+
evaluate_keypoints_batch_for_frame,
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
import yaml
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class BoundingBox(BaseModel):
|
| 39 |
+
x1: int
|
| 40 |
+
y1: int
|
| 41 |
+
x2: int
|
| 42 |
+
y2: int
|
| 43 |
+
cls_id: int
|
| 44 |
+
conf: float
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class TVFrameResult(BaseModel):
|
| 48 |
+
frame_id: int
|
| 49 |
+
boxes: List[BoundingBox]
|
| 50 |
+
keypoints: List[Tuple[int, int]]
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class Miner:
|
| 54 |
+
SMALL_CONTAINED_IOA = Constants.SMALL_CONTAINED_IOA
|
| 55 |
+
SMALL_RATIO_MAX = Constants.SMALL_RATIO_MAX
|
| 56 |
+
SINGLE_PLAYER_HUE_PIVOT = Constants.SINGLE_PLAYER_HUE_PIVOT
|
| 57 |
+
CORNER_INDICES = Constants.CORNER_INDICES
|
| 58 |
+
KEYPOINTS_CONFIDENCE = Constants.KEYPOINTS_CONFIDENCE
|
| 59 |
+
CORNER_CONFIDENCE = Constants.CORNER_CONFIDENCE
|
| 60 |
+
GOALKEEPER_POSITION_MARGIN = Constants.GOALKEEPER_POSITION_MARGIN
|
| 61 |
+
MIN_SAMPLES_FOR_FIT = 16 # Minimum player crops needed before fitting TeamClassifier
|
| 62 |
+
MAX_SAMPLES_FOR_FIT = 600 # Maximum samples to avoid overfitting
|
| 63 |
+
|
| 64 |
+
def __init__(self, path_hf_repo: Path) -> None:
|
| 65 |
+
try:
|
| 66 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 67 |
+
model_path = path_hf_repo / "detection.onnx"
|
| 68 |
+
self.bbox_model = YOLO(model_path)
|
| 69 |
+
|
| 70 |
+
print(f"BBox Model Loaded: class name {self.bbox_model.names}")
|
| 71 |
+
|
| 72 |
+
team_model_path = path_hf_repo / "osnet_model.pth.tar-100"
|
| 73 |
+
self.team_classifier = TeamClassifier(
|
| 74 |
+
device=device,
|
| 75 |
+
batch_size=32,
|
| 76 |
+
model_name=str(team_model_path)
|
| 77 |
+
)
|
| 78 |
+
print("Team Classifier Loaded")
|
| 79 |
+
|
| 80 |
+
# Team classification state
|
| 81 |
+
self.team_classifier_fitted = False
|
| 82 |
+
self.player_crops_for_fit = []
|
| 83 |
+
|
| 84 |
+
self.keypoints_model_yolo = YOLO(path_hf_repo / "keypoint.pt")
|
| 85 |
+
|
| 86 |
+
model_kp_path = path_hf_repo / 'keypoint'
|
| 87 |
+
config_kp_path = path_hf_repo / 'hrnetv2_w48.yaml'
|
| 88 |
+
cfg_kp = yaml.safe_load(open(config_kp_path, 'r'))
|
| 89 |
+
|
| 90 |
+
loaded_state_kp = torch.load(model_kp_path, map_location=device)
|
| 91 |
+
model = get_cls_net(cfg_kp)
|
| 92 |
+
model.load_state_dict(loaded_state_kp)
|
| 93 |
+
model.to(device)
|
| 94 |
+
model.eval()
|
| 95 |
+
|
| 96 |
+
self.keypoints_model = model
|
| 97 |
+
print("Keypoints Model (keypoint.pt) Loaded")
|
| 98 |
+
|
| 99 |
+
template_image_path = path_hf_repo / "football_pitch_template.png"
|
| 100 |
+
self.template_image, self.template_keypoints = load_template_from_file(str(template_image_path))
|
| 101 |
+
|
| 102 |
+
self.kp_threshold = 0.1
|
| 103 |
+
self.pitch_batch_size = 4
|
| 104 |
+
self.health = "healthy"
|
| 105 |
+
|
| 106 |
+
print("✅ Keypoints Model Loaded")
|
| 107 |
+
except Exception as e:
|
| 108 |
+
self.health = "❌ Miner initialization failed: " + str(e)
|
| 109 |
+
print(self.health)
|
| 110 |
+
|
| 111 |
+
def __repr__(self) -> str:
|
| 112 |
+
if self.health == 'healthy':
|
| 113 |
+
return (
|
| 114 |
+
f"health: {self.health}\n"
|
| 115 |
+
f"BBox Model: {type(self.bbox_model).__name__}\n"
|
| 116 |
+
f"Keypoints Model: {type(self.keypoints_model).__name__}"
|
| 117 |
+
)
|
| 118 |
+
else:
|
| 119 |
+
return self.health
|
| 120 |
+
|
| 121 |
+
def _calculate_iou(self, box1: Tuple[float, float, float, float],
|
| 122 |
+
box2: Tuple[float, float, float, float]) -> float:
|
| 123 |
+
"""
|
| 124 |
+
Calculate Intersection over Union (IoU) between two bounding boxes.
|
| 125 |
+
Args:
|
| 126 |
+
box1: (x1, y1, x2, y2)
|
| 127 |
+
box2: (x1, y1, x2, y2)
|
| 128 |
+
Returns:
|
| 129 |
+
IoU score (0-1)
|
| 130 |
+
"""
|
| 131 |
+
x1_1, y1_1, x2_1, y2_1 = box1
|
| 132 |
+
x1_2, y1_2, x2_2, y2_2 = box2
|
| 133 |
+
|
| 134 |
+
# Calculate intersection area
|
| 135 |
+
x_left = max(x1_1, x1_2)
|
| 136 |
+
y_top = max(y1_1, y1_2)
|
| 137 |
+
x_right = min(x2_1, x2_2)
|
| 138 |
+
y_bottom = min(y2_1, y2_2)
|
| 139 |
+
|
| 140 |
+
if x_right < x_left or y_bottom < y_top:
|
| 141 |
+
return 0.0
|
| 142 |
+
|
| 143 |
+
intersection_area = (x_right - x_left) * (y_bottom - y_top)
|
| 144 |
+
|
| 145 |
+
# Calculate union area
|
| 146 |
+
box1_area = (x2_1 - x1_1) * (y2_1 - y1_1)
|
| 147 |
+
box2_area = (x2_2 - x1_2) * (y2_2 - y1_2)
|
| 148 |
+
union_area = box1_area + box2_area - intersection_area
|
| 149 |
+
|
| 150 |
+
if union_area == 0:
|
| 151 |
+
return 0.0
|
| 152 |
+
|
| 153 |
+
return intersection_area / union_area
|
| 154 |
+
|
| 155 |
+
def _extract_jersey_region(self, crop: ndarray) -> ndarray:
|
| 156 |
+
"""
|
| 157 |
+
Extract jersey region (upper body) from player crop.
|
| 158 |
+
For close-ups, focuses on upper 60%, for distant shots uses full crop.
|
| 159 |
+
"""
|
| 160 |
+
if crop is None or crop.size == 0:
|
| 161 |
+
return crop
|
| 162 |
+
|
| 163 |
+
h, w = crop.shape[:2]
|
| 164 |
+
if h < 10 or w < 10:
|
| 165 |
+
return crop
|
| 166 |
+
|
| 167 |
+
# For close-up shots, extract upper body (jersey region)
|
| 168 |
+
is_closeup = h > 100 or (h * w) > 12000
|
| 169 |
+
if is_closeup:
|
| 170 |
+
# Upper 60% of the crop (jersey area, avoiding shorts)
|
| 171 |
+
jersey_top = 0
|
| 172 |
+
jersey_bottom = int(h * 0.60)
|
| 173 |
+
jersey_left = max(0, int(w * 0.05))
|
| 174 |
+
jersey_right = min(w, int(w * 0.95))
|
| 175 |
+
return crop[jersey_top:jersey_bottom, jersey_left:jersey_right]
|
| 176 |
+
return crop
|
| 177 |
+
|
| 178 |
+
def _extract_color_signature(self, crop: ndarray) -> Optional[np.ndarray]:
|
| 179 |
+
"""
|
| 180 |
+
Extract color signature from jersey region using HSV and LAB color spaces.
|
| 181 |
+
Returns a feature vector with dominant colors and color statistics.
|
| 182 |
+
"""
|
| 183 |
+
if crop is None or crop.size == 0:
|
| 184 |
+
return None
|
| 185 |
+
|
| 186 |
+
jersey_region = self._extract_jersey_region(crop)
|
| 187 |
+
if jersey_region.size == 0:
|
| 188 |
+
return None
|
| 189 |
+
|
| 190 |
+
try:
|
| 191 |
+
# Convert to HSV and LAB color spaces
|
| 192 |
+
hsv = cv2.cvtColor(jersey_region, cv2.COLOR_BGR2HSV)
|
| 193 |
+
lab = cv2.cvtColor(jersey_region, cv2.COLOR_BGR2LAB)
|
| 194 |
+
|
| 195 |
+
# Reshape for processing
|
| 196 |
+
hsv_flat = hsv.reshape(-1, 3).astype(np.float32)
|
| 197 |
+
lab_flat = lab.reshape(-1, 3).astype(np.float32)
|
| 198 |
+
|
| 199 |
+
# Compute statistics for HSV
|
| 200 |
+
hsv_mean = np.mean(hsv_flat, axis=0) / 255.0
|
| 201 |
+
hsv_std = np.std(hsv_flat, axis=0) / 255.0
|
| 202 |
+
|
| 203 |
+
# Compute statistics for LAB
|
| 204 |
+
lab_mean = np.mean(lab_flat, axis=0) / 255.0
|
| 205 |
+
lab_std = np.std(lab_flat, axis=0) / 255.0
|
| 206 |
+
|
| 207 |
+
# Dominant color (most frequent hue)
|
| 208 |
+
hue_hist, _ = np.histogram(hsv_flat[:, 0], bins=36, range=(0, 180))
|
| 209 |
+
dominant_hue = np.argmax(hue_hist) * 5 # Convert to hue value
|
| 210 |
+
|
| 211 |
+
# Combine features
|
| 212 |
+
color_features = np.concatenate([
|
| 213 |
+
hsv_mean,
|
| 214 |
+
hsv_std,
|
| 215 |
+
lab_mean[:2], # L and A channels (B is less informative)
|
| 216 |
+
lab_std[:2],
|
| 217 |
+
[dominant_hue / 180.0] # Normalized dominant hue
|
| 218 |
+
])
|
| 219 |
+
|
| 220 |
+
return color_features
|
| 221 |
+
except Exception as e:
|
| 222 |
+
print(f"Error extracting color signature: {e}")
|
| 223 |
+
return None
|
| 224 |
+
|
| 225 |
+
def _get_spatial_position(self, bbox: Tuple[float, float, float, float],
|
| 226 |
+
frame_width: int, frame_height: int) -> Tuple[float, float]:
|
| 227 |
+
"""
|
| 228 |
+
Get normalized spatial position of player on the pitch.
|
| 229 |
+
Returns (x_normalized, y_normalized) where 0,0 is top-left.
|
| 230 |
+
"""
|
| 231 |
+
x1, y1, x2, y2 = bbox
|
| 232 |
+
center_x = (x1 + x2) / 2.0
|
| 233 |
+
center_y = (y1 + y2) / 2.0
|
| 234 |
+
|
| 235 |
+
# Normalize to [0, 1]
|
| 236 |
+
x_norm = center_x / frame_width if frame_width > 0 else 0.5
|
| 237 |
+
y_norm = center_y / frame_height if frame_height > 0 else 0.5
|
| 238 |
+
|
| 239 |
+
return (x_norm, y_norm)
|
| 240 |
+
|
| 241 |
+
def _find_best_match(self, target_box: Tuple[float, float, float, float],
|
| 242 |
+
predicted_frame_data: Dict[int, Tuple[Tuple, str]],
|
| 243 |
+
iou_threshold: float) -> Tuple[Optional[str], float]:
|
| 244 |
+
"""
|
| 245 |
+
Find best matching box in predicted frame data using IoU.
|
| 246 |
+
"""
|
| 247 |
+
best_iou = 0.0
|
| 248 |
+
best_team_id = None
|
| 249 |
+
|
| 250 |
+
for idx, (bbox, team_cls_id) in predicted_frame_data.items():
|
| 251 |
+
iou = self._calculate_iou(target_box, bbox)
|
| 252 |
+
if iou > best_iou and iou >= iou_threshold:
|
| 253 |
+
best_iou = iou
|
| 254 |
+
best_team_id = team_cls_id
|
| 255 |
+
|
| 256 |
+
return (best_team_id, best_iou)
|
| 257 |
+
|
| 258 |
+
def _detect_objects_batch(self, decoded_images: List[ndarray]) -> Dict[int, List[BoundingBox]]:
|
| 259 |
+
batch_size = 16
|
| 260 |
+
detection_results = []
|
| 261 |
+
n_frames = len(decoded_images)
|
| 262 |
+
for frame_number in range(0, n_frames, batch_size):
|
| 263 |
+
batch_images = decoded_images[frame_number: frame_number + batch_size]
|
| 264 |
+
detections = self.bbox_model(batch_images, verbose=False, save=False)
|
| 265 |
+
detection_results.extend(detections)
|
| 266 |
+
|
| 267 |
+
return detection_results
|
| 268 |
+
|
| 269 |
+
def _team_classify(self, detection_results, decoded_images, offset):
|
| 270 |
+
self.team_classifier_fitted = False
|
| 271 |
+
start = time.time()
|
| 272 |
+
# Collect player crops from first batch for fitting
|
| 273 |
+
fit_sample_size = 600
|
| 274 |
+
player_crops_for_fit = []
|
| 275 |
+
|
| 276 |
+
for frame_id in range(len(detection_results)):
|
| 277 |
+
detection_box = detection_results[frame_id].boxes.data
|
| 278 |
+
if len(detection_box) < 4:
|
| 279 |
+
continue
|
| 280 |
+
# Collect player boxes for team classification fitting (first batch only)
|
| 281 |
+
if len(player_crops_for_fit) < fit_sample_size:
|
| 282 |
+
frame_image = decoded_images[frame_id]
|
| 283 |
+
for box in detection_box:
|
| 284 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 285 |
+
if conf < 0.5:
|
| 286 |
+
continue
|
| 287 |
+
mapped_cls_id = str(int(cls_id))
|
| 288 |
+
# Only collect player crops (cls_id = 2)
|
| 289 |
+
if mapped_cls_id == '2':
|
| 290 |
+
crop = frame_image[int(y1):int(y2), int(x1):int(x2)]
|
| 291 |
+
if crop.size > 0:
|
| 292 |
+
player_crops_for_fit.append(crop)
|
| 293 |
+
|
| 294 |
+
# Fit team classifier after collecting samples
|
| 295 |
+
if self.team_classifier and not self.team_classifier_fitted and len(player_crops_for_fit) >= fit_sample_size:
|
| 296 |
+
print(f"Fitting TeamClassifier with {len(player_crops_for_fit)} player crops")
|
| 297 |
+
self.team_classifier.fit(player_crops_for_fit)
|
| 298 |
+
self.team_classifier_fitted = True
|
| 299 |
+
break
|
| 300 |
+
if not self.team_classifier_fitted and len(player_crops_for_fit) >= 16:
|
| 301 |
+
print(f"Fallback: Fitting TeamClassifier with {len(player_crops_for_fit)} player crops")
|
| 302 |
+
self.team_classifier.fit(player_crops_for_fit)
|
| 303 |
+
self.team_classifier_fitted = True
|
| 304 |
+
end = time.time()
|
| 305 |
+
print(f"Fitting Kmeans time: {end - start}")
|
| 306 |
+
|
| 307 |
+
# Second pass: predict teams with configurable frame skipping optimization
|
| 308 |
+
start = time.time()
|
| 309 |
+
|
| 310 |
+
# Get configuration for frame skipping
|
| 311 |
+
prediction_interval = 1 # Default: predict every 2 frames
|
| 312 |
+
iou_threshold = 0.3
|
| 313 |
+
|
| 314 |
+
print(f"Team classification - prediction_interval: {prediction_interval}, iou_threshold: {iou_threshold}")
|
| 315 |
+
|
| 316 |
+
# Storage for predicted frame results: {frame_id: {box_idx: (bbox, team_id)}}
|
| 317 |
+
predicted_frame_data = {}
|
| 318 |
+
|
| 319 |
+
# Step 1: Predict for frames at prediction_interval only
|
| 320 |
+
frames_to_predict = []
|
| 321 |
+
for frame_id in range(len(detection_results)):
|
| 322 |
+
if frame_id % prediction_interval == 0:
|
| 323 |
+
frames_to_predict.append(frame_id)
|
| 324 |
+
|
| 325 |
+
print(f"Predicting teams for {len(frames_to_predict)}/{len(detection_results)} frames "
|
| 326 |
+
f"(saving {100 - (len(frames_to_predict) * 100 // len(detection_results))}% compute)")
|
| 327 |
+
|
| 328 |
+
for frame_id in frames_to_predict:
|
| 329 |
+
detection_box = detection_results[frame_id].boxes.data
|
| 330 |
+
frame_image = decoded_images[frame_id]
|
| 331 |
+
|
| 332 |
+
# Collect player crops for this frame
|
| 333 |
+
frame_player_crops = []
|
| 334 |
+
frame_player_indices = []
|
| 335 |
+
frame_player_boxes = []
|
| 336 |
+
|
| 337 |
+
for idx, box in enumerate(detection_box):
|
| 338 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 339 |
+
if cls_id == 2 and conf < 0.6:
|
| 340 |
+
continue
|
| 341 |
+
mapped_cls_id = str(int(cls_id))
|
| 342 |
+
|
| 343 |
+
# Collect player crops for prediction
|
| 344 |
+
if self.team_classifier and self.team_classifier_fitted and mapped_cls_id == '2':
|
| 345 |
+
crop = frame_image[int(y1):int(y2), int(x1):int(x2)]
|
| 346 |
+
if crop.size > 0:
|
| 347 |
+
frame_player_crops.append(crop)
|
| 348 |
+
frame_player_indices.append(idx)
|
| 349 |
+
frame_player_boxes.append((x1, y1, x2, y2))
|
| 350 |
+
|
| 351 |
+
# Predict teams for all players in this frame
|
| 352 |
+
if len(frame_player_crops) > 0:
|
| 353 |
+
team_ids = self.team_classifier.predict(frame_player_crops)
|
| 354 |
+
predicted_frame_data[frame_id] = {}
|
| 355 |
+
for idx, bbox, team_id in zip(frame_player_indices, frame_player_boxes, team_ids):
|
| 356 |
+
# Map team_id (0,1) to cls_id (6,7)
|
| 357 |
+
team_cls_id = str(6 + int(team_id))
|
| 358 |
+
predicted_frame_data[frame_id][idx] = (bbox, team_cls_id)
|
| 359 |
+
|
| 360 |
+
# Step 2: Process all frames (interpolate skipped frames)
|
| 361 |
+
fallback_count = 0
|
| 362 |
+
interpolated_count = 0
|
| 363 |
+
bboxes: dict[int, list[BoundingBox]] = {}
|
| 364 |
+
for frame_id in range(len(detection_results)):
|
| 365 |
+
detection_box = detection_results[frame_id].boxes.data
|
| 366 |
+
frame_image = decoded_images[frame_id]
|
| 367 |
+
boxes = []
|
| 368 |
+
|
| 369 |
+
team_predictions = {}
|
| 370 |
+
|
| 371 |
+
if frame_id % prediction_interval == 0:
|
| 372 |
+
# Predicted frame: use pre-computed predictions
|
| 373 |
+
if frame_id in predicted_frame_data:
|
| 374 |
+
for idx, (bbox, team_cls_id) in predicted_frame_data[frame_id].items():
|
| 375 |
+
team_predictions[idx] = team_cls_id
|
| 376 |
+
else:
|
| 377 |
+
# Skipped frame: interpolate from neighboring predicted frames
|
| 378 |
+
# Find nearest predicted frames
|
| 379 |
+
prev_predicted_frame = (frame_id // prediction_interval) * prediction_interval
|
| 380 |
+
next_predicted_frame = prev_predicted_frame + prediction_interval
|
| 381 |
+
|
| 382 |
+
# Collect current frame player boxes
|
| 383 |
+
for idx, box in enumerate(detection_box):
|
| 384 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 385 |
+
if cls_id == 2 and conf < 0.6:
|
| 386 |
+
continue
|
| 387 |
+
mapped_cls_id = str(int(cls_id))
|
| 388 |
+
|
| 389 |
+
if self.team_classifier and self.team_classifier_fitted and mapped_cls_id == '2':
|
| 390 |
+
target_box = (x1, y1, x2, y2)
|
| 391 |
+
|
| 392 |
+
# Try to match with previous predicted frame
|
| 393 |
+
best_team_id = None
|
| 394 |
+
best_iou = 0.0
|
| 395 |
+
|
| 396 |
+
if prev_predicted_frame in predicted_frame_data:
|
| 397 |
+
team_id, iou = self._find_best_match(
|
| 398 |
+
target_box,
|
| 399 |
+
predicted_frame_data[prev_predicted_frame],
|
| 400 |
+
iou_threshold
|
| 401 |
+
)
|
| 402 |
+
if team_id is not None:
|
| 403 |
+
best_team_id = team_id
|
| 404 |
+
best_iou = iou
|
| 405 |
+
|
| 406 |
+
# Try to match with next predicted frame if available and no good match yet
|
| 407 |
+
if best_team_id is None and next_predicted_frame < len(detection_results):
|
| 408 |
+
if next_predicted_frame in predicted_frame_data:
|
| 409 |
+
team_id, iou = self._find_best_match(
|
| 410 |
+
target_box,
|
| 411 |
+
predicted_frame_data[next_predicted_frame],
|
| 412 |
+
iou_threshold
|
| 413 |
+
)
|
| 414 |
+
if team_id is not None and iou > best_iou:
|
| 415 |
+
best_team_id = team_id
|
| 416 |
+
best_iou = iou
|
| 417 |
+
|
| 418 |
+
# Track interpolation success
|
| 419 |
+
if best_team_id is not None:
|
| 420 |
+
interpolated_count += 1
|
| 421 |
+
else:
|
| 422 |
+
# Fallback: if no match found, predict individually
|
| 423 |
+
crop = frame_image[int(y1):int(y2), int(x1):int(x2)]
|
| 424 |
+
if crop.size > 0:
|
| 425 |
+
team_id = self.team_classifier.predict([crop])[0]
|
| 426 |
+
best_team_id = str(6 + int(team_id))
|
| 427 |
+
fallback_count += 1
|
| 428 |
+
|
| 429 |
+
if best_team_id is not None:
|
| 430 |
+
team_predictions[idx] = best_team_id
|
| 431 |
+
|
| 432 |
+
# Parse boxes with team classification
|
| 433 |
+
for idx, box in enumerate(detection_box):
|
| 434 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 435 |
+
if cls_id == 2 and conf < 0.6:
|
| 436 |
+
continue
|
| 437 |
+
|
| 438 |
+
# Check overlap with staff box
|
| 439 |
+
overlap_staff = False
|
| 440 |
+
for idy, boxy in enumerate(detection_box):
|
| 441 |
+
s_x1, s_y1, s_x2, s_y2, s_conf, s_cls_id = boxy.tolist()
|
| 442 |
+
if cls_id == 2 and s_cls_id == 4:
|
| 443 |
+
staff_iou = self._calculate_iou(box[:4], boxy[:4])
|
| 444 |
+
if staff_iou >= 0.8:
|
| 445 |
+
overlap_staff = True
|
| 446 |
+
break
|
| 447 |
+
if overlap_staff:
|
| 448 |
+
continue
|
| 449 |
+
|
| 450 |
+
mapped_cls_id = str(int(cls_id))
|
| 451 |
+
|
| 452 |
+
# Override cls_id for players with team prediction
|
| 453 |
+
if idx in team_predictions:
|
| 454 |
+
mapped_cls_id = team_predictions[idx]
|
| 455 |
+
if mapped_cls_id != '4':
|
| 456 |
+
if int(mapped_cls_id) == 3 and conf < 0.5:
|
| 457 |
+
continue
|
| 458 |
+
boxes.append(
|
| 459 |
+
BoundingBox(
|
| 460 |
+
x1=int(x1),
|
| 461 |
+
y1=int(y1),
|
| 462 |
+
x2=int(x2),
|
| 463 |
+
y2=int(y2),
|
| 464 |
+
cls_id=int(mapped_cls_id),
|
| 465 |
+
conf=float(conf),
|
| 466 |
+
)
|
| 467 |
+
)
|
| 468 |
+
# Handle footballs - keep only the best one
|
| 469 |
+
footballs = [bb for bb in boxes if int(bb.cls_id) == 0]
|
| 470 |
+
if len(footballs) > 1:
|
| 471 |
+
best_ball = max(footballs, key=lambda b: b.conf)
|
| 472 |
+
boxes = [bb for bb in boxes if int(bb.cls_id) != 0]
|
| 473 |
+
boxes.append(best_ball)
|
| 474 |
+
|
| 475 |
+
bboxes[offset + frame_id] = boxes
|
| 476 |
+
return bboxes
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
def predict_batch(self, batch_images: List[ndarray], offset: int, n_keypoints: int) -> List[TVFrameResult]:
|
| 480 |
+
print('=' * 10)
|
| 481 |
+
print(f"Offset: {offset}, Batch size: {len(batch_images)}")
|
| 482 |
+
print('=' * 10)
|
| 483 |
+
|
| 484 |
+
start = time.time()
|
| 485 |
+
detection_results = self._detect_objects_batch(batch_images)
|
| 486 |
+
end = time.time()
|
| 487 |
+
print(f"Detection time: {end - start}")
|
| 488 |
+
|
| 489 |
+
# Use hybrid team classification
|
| 490 |
+
start = time.time()
|
| 491 |
+
bboxes = self._team_classify(detection_results, batch_images, offset)
|
| 492 |
+
end = time.time()
|
| 493 |
+
print(f"Team classify time: {end - start}")
|
| 494 |
+
|
| 495 |
+
# Phase 3: Keypoint Detection
|
| 496 |
+
keypoints_yolo: Dict[int, List[Tuple[int, int]]] = {}
|
| 497 |
+
|
| 498 |
+
keypoints_yolo = self._detect_keypoints_batch(batch_images, offset, n_keypoints)
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
# pitch_batch_size = min(self.pitch_batch_size, len(batch_images))
|
| 502 |
+
# keypoints: Dict[int, List[Tuple[int, int]]] = {}
|
| 503 |
+
|
| 504 |
+
# start = time.time()
|
| 505 |
+
# while True:
|
| 506 |
+
# gc.collect()
|
| 507 |
+
# if torch.cuda.is_available():
|
| 508 |
+
# torch.cuda.empty_cache()
|
| 509 |
+
# torch.cuda.synchronize()
|
| 510 |
+
# device_str = "cuda"
|
| 511 |
+
# keypoints_result = process_batch_input(
|
| 512 |
+
# batch_images,
|
| 513 |
+
# self.keypoints_model,
|
| 514 |
+
# self.kp_threshold,
|
| 515 |
+
# device_str,
|
| 516 |
+
# batch_size=pitch_batch_size,
|
| 517 |
+
# )
|
| 518 |
+
# if keypoints_result is not None and len(keypoints_result) > 0:
|
| 519 |
+
# for frame_number_in_batch, kp_dict in enumerate(keypoints_result):
|
| 520 |
+
# if frame_number_in_batch >= len(batch_images):
|
| 521 |
+
# break
|
| 522 |
+
# frame_keypoints: List[Tuple[int, int]] = []
|
| 523 |
+
# try:
|
| 524 |
+
# height, width = batch_images[frame_number_in_batch].shape[:2]
|
| 525 |
+
# if kp_dict is not None and isinstance(kp_dict, dict):
|
| 526 |
+
# for idx in range(32):
|
| 527 |
+
# x, y = 0, 0
|
| 528 |
+
# kp_idx = idx + 1
|
| 529 |
+
# if kp_idx in kp_dict:
|
| 530 |
+
# try:
|
| 531 |
+
# kp_data = kp_dict[kp_idx]
|
| 532 |
+
# if isinstance(kp_data, dict) and "x" in kp_data and "y" in kp_data:
|
| 533 |
+
# x = int(kp_data["x"] * width)
|
| 534 |
+
# y = int(kp_data["y"] * height)
|
| 535 |
+
# except (KeyError, TypeError, ValueError):
|
| 536 |
+
# pass
|
| 537 |
+
# frame_keypoints.append((x, y))
|
| 538 |
+
# except (IndexError, ValueError, AttributeError):
|
| 539 |
+
# frame_keypoints = [(0, 0)] * 32
|
| 540 |
+
# if len(frame_keypoints) < n_keypoints:
|
| 541 |
+
# frame_keypoints.extend([(0, 0)] * (n_keypoints - len(frame_keypoints)))
|
| 542 |
+
# else:
|
| 543 |
+
# frame_keypoints = frame_keypoints[:n_keypoints]
|
| 544 |
+
|
| 545 |
+
# # time1 = time.time()
|
| 546 |
+
# # frame_keypoints_yolo = keypoints_yolo.get(offset + frame_number_in_batch, frame_keypoints)
|
| 547 |
+
|
| 548 |
+
# # valid_keypoints_count = 0
|
| 549 |
+
# # valid_keypoints_yolo_count = 0
|
| 550 |
+
# # for kp in frame_keypoints:
|
| 551 |
+
# # if kp[0] != 0.0 or kp[1] != 0.0:
|
| 552 |
+
# # valid_keypoints_count += 1
|
| 553 |
+
# # if valid_keypoints_count > 3:
|
| 554 |
+
# # break
|
| 555 |
+
|
| 556 |
+
# # for kp in frame_keypoints_yolo:
|
| 557 |
+
# # if kp[0] != 0.0 or kp[1] != 0.0:
|
| 558 |
+
# # valid_keypoints_yolo_count += 1
|
| 559 |
+
# # if valid_keypoints_yolo_count > 3:
|
| 560 |
+
# # break
|
| 561 |
+
|
| 562 |
+
# # # Evaluate and select best keypoints (using batch evaluation for speed)
|
| 563 |
+
# # if valid_keypoints_count > 3 and valid_keypoints_yolo_count > 3:
|
| 564 |
+
# # try:
|
| 565 |
+
# # # Evaluate both keypoint sets in batch (much faster!)
|
| 566 |
+
# # scores = evaluate_keypoints_batch_for_frame(
|
| 567 |
+
# # template_keypoints=self.template_keypoints,
|
| 568 |
+
# # frame_keypoints_list=[frame_keypoints, frame_keypoints_yolo],
|
| 569 |
+
# # frame=batch_images[frame_number_in_batch],
|
| 570 |
+
# # floor_markings_template=self.template_image,
|
| 571 |
+
# # device="cuda"
|
| 572 |
+
# # )
|
| 573 |
+
# # score = scores[0]
|
| 574 |
+
# # score_yolo = scores[1]
|
| 575 |
+
|
| 576 |
+
# # # Select the one with higher score
|
| 577 |
+
# # if score_yolo > score:
|
| 578 |
+
# # frame_keypoints = frame_keypoints_yolo
|
| 579 |
+
# # except Exception as e:
|
| 580 |
+
# # # Fallback: use YOLO if available, otherwise use pitch model
|
| 581 |
+
# # if valid_keypoints_yolo_count > 3:
|
| 582 |
+
# # frame_keypoints = frame_keypoints_yolo
|
| 583 |
+
# # elif valid_keypoints_yolo_count > 3:
|
| 584 |
+
# # # Only YOLO has valid keypoints
|
| 585 |
+
# # frame_keypoints = frame_keypoints_yolo
|
| 586 |
+
# # time2 = time.time()
|
| 587 |
+
# # print(f"Keypoint evaluation time: {time2 - time1}")
|
| 588 |
+
|
| 589 |
+
# keypoints[offset + frame_number_in_batch] = frame_keypoints
|
| 590 |
+
# break
|
| 591 |
+
# end = time.time()
|
| 592 |
+
# print(f"Keypoint time: {end - start}")
|
| 593 |
+
|
| 594 |
+
results: List[TVFrameResult] = []
|
| 595 |
+
for frame_number in range(offset, offset + len(batch_images)):
|
| 596 |
+
frame_boxes = bboxes.get(frame_number, [])
|
| 597 |
+
result = TVFrameResult(
|
| 598 |
+
frame_id=frame_number,
|
| 599 |
+
boxes=frame_boxes,
|
| 600 |
+
keypoints=keypoints_yolo.get(
|
| 601 |
+
frame_number,
|
| 602 |
+
[(0, 0) for _ in range(n_keypoints)],
|
| 603 |
+
),
|
| 604 |
+
)
|
| 605 |
+
results.append(result)
|
| 606 |
+
|
| 607 |
+
start = time.time()
|
| 608 |
+
if len(batch_images) > 0:
|
| 609 |
+
h, w = batch_images[0].shape[:2]
|
| 610 |
+
results = run_keypoints_post_processing_v2(
|
| 611 |
+
results, w, h,
|
| 612 |
+
frames=batch_images,
|
| 613 |
+
template_keypoints=self.template_keypoints,
|
| 614 |
+
floor_markings_template=self.template_image,
|
| 615 |
+
offset=offset
|
| 616 |
+
)
|
| 617 |
+
end = time.time()
|
| 618 |
+
print(f"Keypoint post processing time: {end - start}")
|
| 619 |
+
|
| 620 |
+
gc.collect()
|
| 621 |
+
if torch.cuda.is_available():
|
| 622 |
+
torch.cuda.empty_cache()
|
| 623 |
+
torch.cuda.synchronize()
|
| 624 |
+
|
| 625 |
+
return results
|
| 626 |
+
|
| 627 |
+
def _detect_keypoints_batch(self, batch_images: List[ndarray],
|
| 628 |
+
offset: int, n_keypoints: int) -> Dict[int, List[Tuple[int, int]]]:
|
| 629 |
+
"""
|
| 630 |
+
Phase 3: Keypoint detection for all frames in batch.
|
| 631 |
+
|
| 632 |
+
Args:
|
| 633 |
+
batch_images: List of images to process
|
| 634 |
+
offset: Frame offset for numbering
|
| 635 |
+
n_keypoints: Number of keypoints expected
|
| 636 |
+
|
| 637 |
+
Returns:
|
| 638 |
+
Dictionary mapping frame_id to list of keypoint coordinates
|
| 639 |
+
"""
|
| 640 |
+
keypoints: Dict[int, List[Tuple[int, int]]] = {}
|
| 641 |
+
keypoints_model_results = self.keypoints_model_yolo.predict(batch_images)
|
| 642 |
+
|
| 643 |
+
if keypoints_model_results is None:
|
| 644 |
+
return keypoints
|
| 645 |
+
|
| 646 |
+
for frame_idx_in_batch, detection in enumerate(keypoints_model_results):
|
| 647 |
+
if not hasattr(detection, "keypoints") or detection.keypoints is None:
|
| 648 |
+
continue
|
| 649 |
+
|
| 650 |
+
# Extract keypoints with confidence
|
| 651 |
+
frame_keypoints_with_conf: List[Tuple[int, int, float]] = []
|
| 652 |
+
for i, part_points in enumerate(detection.keypoints.data):
|
| 653 |
+
for k_id, (x, y, _) in enumerate(part_points):
|
| 654 |
+
confidence = float(detection.keypoints.conf[i][k_id])
|
| 655 |
+
frame_keypoints_with_conf.append((int(x), int(y), confidence))
|
| 656 |
+
|
| 657 |
+
# Pad or truncate to expected number of keypoints
|
| 658 |
+
if len(frame_keypoints_with_conf) < n_keypoints:
|
| 659 |
+
frame_keypoints_with_conf.extend(
|
| 660 |
+
[(0, 0, 0.0)] * (n_keypoints - len(frame_keypoints_with_conf))
|
| 661 |
+
)
|
| 662 |
+
else:
|
| 663 |
+
frame_keypoints_with_conf = frame_keypoints_with_conf[:n_keypoints]
|
| 664 |
+
|
| 665 |
+
# Filter keypoints based on confidence thresholds
|
| 666 |
+
filtered_keypoints: List[Tuple[int, int]] = []
|
| 667 |
+
for idx, (x, y, confidence) in enumerate(frame_keypoints_with_conf):
|
| 668 |
+
if idx in self.CORNER_INDICES:
|
| 669 |
+
# Corner keypoints have lower confidence threshold
|
| 670 |
+
if confidence < 0.3:
|
| 671 |
+
filtered_keypoints.append((0, 0))
|
| 672 |
+
else:
|
| 673 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 674 |
+
else:
|
| 675 |
+
# Regular keypoints
|
| 676 |
+
if confidence < 0.5:
|
| 677 |
+
filtered_keypoints.append((0, 0))
|
| 678 |
+
else:
|
| 679 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 680 |
+
|
| 681 |
+
frame_id = offset + frame_idx_in_batch
|
| 682 |
+
keypoints[frame_id] = filtered_keypoints
|
| 683 |
+
|
| 684 |
+
return keypoints
|
| 685 |
+
|
miner2.py
ADDED
|
@@ -0,0 +1,953 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
from typing import List, Tuple, Dict, Optional
|
| 3 |
+
import sys
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
from numpy import ndarray
|
| 7 |
+
from pydantic import BaseModel
|
| 8 |
+
|
| 9 |
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
| 10 |
+
from keypoint_helper import run_keypoints_post_processing
|
| 11 |
+
from keypoint_helper_v2 import run_keypoints_post_processing as run_keypoints_post_processing_v2
|
| 12 |
+
|
| 13 |
+
from ultralytics import YOLO
|
| 14 |
+
from team_cluster import TeamClassifier
|
| 15 |
+
from utils import (
|
| 16 |
+
BoundingBox,
|
| 17 |
+
Constants,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
import time
|
| 21 |
+
import torch
|
| 22 |
+
import gc
|
| 23 |
+
import cv2
|
| 24 |
+
import numpy as np
|
| 25 |
+
from collections import defaultdict
|
| 26 |
+
from pitch import process_batch_input, get_cls_net
|
| 27 |
+
from keypoint_evaluation import (
|
| 28 |
+
evaluate_keypoints_for_frame,
|
| 29 |
+
evaluate_keypoints_for_frame_gpu,
|
| 30 |
+
load_template_from_file,
|
| 31 |
+
evaluate_keypoints_for_frame_opencv_cuda,
|
| 32 |
+
evaluate_keypoints_batch_for_frame,
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
import yaml
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class BoundingBox(BaseModel):
|
| 39 |
+
x1: int
|
| 40 |
+
y1: int
|
| 41 |
+
x2: int
|
| 42 |
+
y2: int
|
| 43 |
+
cls_id: int
|
| 44 |
+
conf: float
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class TVFrameResult(BaseModel):
|
| 48 |
+
frame_id: int
|
| 49 |
+
boxes: List[BoundingBox]
|
| 50 |
+
keypoints: List[Tuple[int, int]]
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class Miner:
|
| 54 |
+
SMALL_CONTAINED_IOA = Constants.SMALL_CONTAINED_IOA
|
| 55 |
+
SMALL_RATIO_MAX = Constants.SMALL_RATIO_MAX
|
| 56 |
+
SINGLE_PLAYER_HUE_PIVOT = Constants.SINGLE_PLAYER_HUE_PIVOT
|
| 57 |
+
CORNER_INDICES = Constants.CORNER_INDICES
|
| 58 |
+
KEYPOINTS_CONFIDENCE = Constants.KEYPOINTS_CONFIDENCE
|
| 59 |
+
CORNER_CONFIDENCE = Constants.CORNER_CONFIDENCE
|
| 60 |
+
GOALKEEPER_POSITION_MARGIN = Constants.GOALKEEPER_POSITION_MARGIN
|
| 61 |
+
MIN_SAMPLES_FOR_FIT = 16 # Minimum player crops needed before fitting TeamClassifier
|
| 62 |
+
MAX_SAMPLES_FOR_FIT = 600 # Maximum samples to avoid overfitting
|
| 63 |
+
|
| 64 |
+
def __init__(self, path_hf_repo: Path) -> None:
|
| 65 |
+
try:
|
| 66 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 67 |
+
model_path = path_hf_repo / "detection.onnx"
|
| 68 |
+
self.bbox_model = YOLO(model_path)
|
| 69 |
+
|
| 70 |
+
print(f"BBox Model Loaded: class name {self.bbox_model.names}")
|
| 71 |
+
|
| 72 |
+
team_model_path = path_hf_repo / "osnet_model.pth.tar-100"
|
| 73 |
+
self.team_classifier = TeamClassifier(
|
| 74 |
+
device=device,
|
| 75 |
+
batch_size=32,
|
| 76 |
+
model_name=str(team_model_path)
|
| 77 |
+
)
|
| 78 |
+
print("Team Classifier Loaded")
|
| 79 |
+
|
| 80 |
+
self.last_score = 0
|
| 81 |
+
self.last_valid_keypoints = None
|
| 82 |
+
# Team classification state
|
| 83 |
+
self.team_classifier_fitted = False
|
| 84 |
+
self.player_crops_for_fit = []
|
| 85 |
+
|
| 86 |
+
self.keypoints_model_yolo = YOLO(path_hf_repo / "keypoint.pt")
|
| 87 |
+
|
| 88 |
+
model_kp_path = path_hf_repo / 'keypoint'
|
| 89 |
+
config_kp_path = path_hf_repo / 'hrnetv2_w48.yaml'
|
| 90 |
+
cfg_kp = yaml.safe_load(open(config_kp_path, 'r'))
|
| 91 |
+
|
| 92 |
+
loaded_state_kp = torch.load(model_kp_path, map_location=device)
|
| 93 |
+
model = get_cls_net(cfg_kp)
|
| 94 |
+
model.load_state_dict(loaded_state_kp)
|
| 95 |
+
model.to(device)
|
| 96 |
+
model.eval()
|
| 97 |
+
|
| 98 |
+
self.keypoints_model = model
|
| 99 |
+
print("Keypoints Model (keypoint.pt) Loaded")
|
| 100 |
+
|
| 101 |
+
template_image_path = path_hf_repo / "football_pitch_template.png"
|
| 102 |
+
self.template_image, self.template_keypoints = load_template_from_file(str(template_image_path))
|
| 103 |
+
|
| 104 |
+
self.kp_threshold = 0.1
|
| 105 |
+
self.pitch_batch_size = 4
|
| 106 |
+
self.health = "healthy"
|
| 107 |
+
|
| 108 |
+
print("✅ Keypoints Model Loaded")
|
| 109 |
+
except Exception as e:
|
| 110 |
+
self.health = "❌ Miner initialization failed: " + str(e)
|
| 111 |
+
print(self.health)
|
| 112 |
+
|
| 113 |
+
def __repr__(self) -> str:
|
| 114 |
+
if self.health == 'healthy':
|
| 115 |
+
return (
|
| 116 |
+
f"health: {self.health}\n"
|
| 117 |
+
f"BBox Model: {type(self.bbox_model).__name__}\n"
|
| 118 |
+
f"Keypoints Model: {type(self.keypoints_model).__name__}"
|
| 119 |
+
)
|
| 120 |
+
else:
|
| 121 |
+
return self.health
|
| 122 |
+
|
| 123 |
+
def _calculate_iou(self, box1: Tuple[float, float, float, float],
|
| 124 |
+
box2: Tuple[float, float, float, float]) -> float:
|
| 125 |
+
"""
|
| 126 |
+
Calculate Intersection over Union (IoU) between two bounding boxes.
|
| 127 |
+
Args:
|
| 128 |
+
box1: (x1, y1, x2, y2)
|
| 129 |
+
box2: (x1, y1, x2, y2)
|
| 130 |
+
Returns:
|
| 131 |
+
IoU score (0-1)
|
| 132 |
+
"""
|
| 133 |
+
x1_1, y1_1, x2_1, y2_1 = box1
|
| 134 |
+
x1_2, y1_2, x2_2, y2_2 = box2
|
| 135 |
+
|
| 136 |
+
# Calculate intersection area
|
| 137 |
+
x_left = max(x1_1, x1_2)
|
| 138 |
+
y_top = max(y1_1, y1_2)
|
| 139 |
+
x_right = min(x2_1, x2_2)
|
| 140 |
+
y_bottom = min(y2_1, y2_2)
|
| 141 |
+
|
| 142 |
+
if x_right < x_left or y_bottom < y_top:
|
| 143 |
+
return 0.0
|
| 144 |
+
|
| 145 |
+
intersection_area = (x_right - x_left) * (y_bottom - y_top)
|
| 146 |
+
|
| 147 |
+
# Calculate union area
|
| 148 |
+
box1_area = (x2_1 - x1_1) * (y2_1 - y1_1)
|
| 149 |
+
box2_area = (x2_2 - x1_2) * (y2_2 - y1_2)
|
| 150 |
+
union_area = box1_area + box2_area - intersection_area
|
| 151 |
+
|
| 152 |
+
if union_area == 0:
|
| 153 |
+
return 0.0
|
| 154 |
+
|
| 155 |
+
return intersection_area / union_area
|
| 156 |
+
|
| 157 |
+
def _extract_jersey_region(self, crop: ndarray) -> ndarray:
|
| 158 |
+
"""
|
| 159 |
+
Extract jersey region (upper body) from player crop.
|
| 160 |
+
For close-ups, focuses on upper 60%, for distant shots uses full crop.
|
| 161 |
+
"""
|
| 162 |
+
if crop is None or crop.size == 0:
|
| 163 |
+
return crop
|
| 164 |
+
|
| 165 |
+
h, w = crop.shape[:2]
|
| 166 |
+
if h < 10 or w < 10:
|
| 167 |
+
return crop
|
| 168 |
+
|
| 169 |
+
# For close-up shots, extract upper body (jersey region)
|
| 170 |
+
is_closeup = h > 100 or (h * w) > 12000
|
| 171 |
+
if is_closeup:
|
| 172 |
+
# Upper 60% of the crop (jersey area, avoiding shorts)
|
| 173 |
+
jersey_top = 0
|
| 174 |
+
jersey_bottom = int(h * 0.60)
|
| 175 |
+
jersey_left = max(0, int(w * 0.05))
|
| 176 |
+
jersey_right = min(w, int(w * 0.95))
|
| 177 |
+
return crop[jersey_top:jersey_bottom, jersey_left:jersey_right]
|
| 178 |
+
return crop
|
| 179 |
+
|
| 180 |
+
def _extract_color_signature(self, crop: ndarray) -> Optional[np.ndarray]:
|
| 181 |
+
"""
|
| 182 |
+
Extract color signature from jersey region using HSV and LAB color spaces.
|
| 183 |
+
Returns a feature vector with dominant colors and color statistics.
|
| 184 |
+
"""
|
| 185 |
+
if crop is None or crop.size == 0:
|
| 186 |
+
return None
|
| 187 |
+
|
| 188 |
+
jersey_region = self._extract_jersey_region(crop)
|
| 189 |
+
if jersey_region.size == 0:
|
| 190 |
+
return None
|
| 191 |
+
|
| 192 |
+
try:
|
| 193 |
+
# Convert to HSV and LAB color spaces
|
| 194 |
+
hsv = cv2.cvtColor(jersey_region, cv2.COLOR_BGR2HSV)
|
| 195 |
+
lab = cv2.cvtColor(jersey_region, cv2.COLOR_BGR2LAB)
|
| 196 |
+
|
| 197 |
+
# Reshape for processing
|
| 198 |
+
hsv_flat = hsv.reshape(-1, 3).astype(np.float32)
|
| 199 |
+
lab_flat = lab.reshape(-1, 3).astype(np.float32)
|
| 200 |
+
|
| 201 |
+
# Compute statistics for HSV
|
| 202 |
+
hsv_mean = np.mean(hsv_flat, axis=0) / 255.0
|
| 203 |
+
hsv_std = np.std(hsv_flat, axis=0) / 255.0
|
| 204 |
+
|
| 205 |
+
# Compute statistics for LAB
|
| 206 |
+
lab_mean = np.mean(lab_flat, axis=0) / 255.0
|
| 207 |
+
lab_std = np.std(lab_flat, axis=0) / 255.0
|
| 208 |
+
|
| 209 |
+
# Dominant color (most frequent hue)
|
| 210 |
+
hue_hist, _ = np.histogram(hsv_flat[:, 0], bins=36, range=(0, 180))
|
| 211 |
+
dominant_hue = np.argmax(hue_hist) * 5 # Convert to hue value
|
| 212 |
+
|
| 213 |
+
# Combine features
|
| 214 |
+
color_features = np.concatenate([
|
| 215 |
+
hsv_mean,
|
| 216 |
+
hsv_std,
|
| 217 |
+
lab_mean[:2], # L and A channels (B is less informative)
|
| 218 |
+
lab_std[:2],
|
| 219 |
+
[dominant_hue / 180.0] # Normalized dominant hue
|
| 220 |
+
])
|
| 221 |
+
|
| 222 |
+
return color_features
|
| 223 |
+
except Exception as e:
|
| 224 |
+
print(f"Error extracting color signature: {e}")
|
| 225 |
+
return None
|
| 226 |
+
|
| 227 |
+
def _get_spatial_position(self, bbox: Tuple[float, float, float, float],
|
| 228 |
+
frame_width: int, frame_height: int) -> Tuple[float, float]:
|
| 229 |
+
"""
|
| 230 |
+
Get normalized spatial position of player on the pitch.
|
| 231 |
+
Returns (x_normalized, y_normalized) where 0,0 is top-left.
|
| 232 |
+
"""
|
| 233 |
+
x1, y1, x2, y2 = bbox
|
| 234 |
+
center_x = (x1 + x2) / 2.0
|
| 235 |
+
center_y = (y1 + y2) / 2.0
|
| 236 |
+
|
| 237 |
+
# Normalize to [0, 1]
|
| 238 |
+
x_norm = center_x / frame_width if frame_width > 0 else 0.5
|
| 239 |
+
y_norm = center_y / frame_height if frame_height > 0 else 0.5
|
| 240 |
+
|
| 241 |
+
return (x_norm, y_norm)
|
| 242 |
+
|
| 243 |
+
def _find_best_match(self, target_box: Tuple[float, float, float, float],
|
| 244 |
+
predicted_frame_data: Dict[int, Tuple[Tuple, str]],
|
| 245 |
+
iou_threshold: float) -> Tuple[Optional[str], float]:
|
| 246 |
+
"""
|
| 247 |
+
Find best matching box in predicted frame data using IoU.
|
| 248 |
+
"""
|
| 249 |
+
best_iou = 0.0
|
| 250 |
+
best_team_id = None
|
| 251 |
+
|
| 252 |
+
for idx, (bbox, team_cls_id) in predicted_frame_data.items():
|
| 253 |
+
iou = self._calculate_iou(target_box, bbox)
|
| 254 |
+
if iou > best_iou and iou >= iou_threshold:
|
| 255 |
+
best_iou = iou
|
| 256 |
+
best_team_id = team_cls_id
|
| 257 |
+
|
| 258 |
+
return (best_team_id, best_iou)
|
| 259 |
+
|
| 260 |
+
def _detect_objects_batch(self, decoded_images: List[ndarray]) -> Dict[int, List[BoundingBox]]:
|
| 261 |
+
batch_size = 16
|
| 262 |
+
detection_results = []
|
| 263 |
+
n_frames = len(decoded_images)
|
| 264 |
+
for frame_number in range(0, n_frames, batch_size):
|
| 265 |
+
batch_images = decoded_images[frame_number: frame_number + batch_size]
|
| 266 |
+
detections = self.bbox_model(batch_images, verbose=False, save=False)
|
| 267 |
+
detection_results.extend(detections)
|
| 268 |
+
|
| 269 |
+
return detection_results
|
| 270 |
+
|
| 271 |
+
def _team_classify(self, detection_results, decoded_images, offset):
|
| 272 |
+
self.team_classifier_fitted = False
|
| 273 |
+
start = time.time()
|
| 274 |
+
# Collect player crops from first batch for fitting
|
| 275 |
+
fit_sample_size = 600
|
| 276 |
+
player_crops_for_fit = []
|
| 277 |
+
|
| 278 |
+
for frame_id in range(len(detection_results)):
|
| 279 |
+
detection_box = detection_results[frame_id].boxes.data
|
| 280 |
+
if len(detection_box) < 4:
|
| 281 |
+
continue
|
| 282 |
+
# Collect player boxes for team classification fitting (first batch only)
|
| 283 |
+
if len(player_crops_for_fit) < fit_sample_size:
|
| 284 |
+
frame_image = decoded_images[frame_id]
|
| 285 |
+
for box in detection_box:
|
| 286 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 287 |
+
if conf < 0.5:
|
| 288 |
+
continue
|
| 289 |
+
mapped_cls_id = str(int(cls_id))
|
| 290 |
+
# Only collect player crops (cls_id = 2)
|
| 291 |
+
if mapped_cls_id == '2':
|
| 292 |
+
crop = frame_image[int(y1):int(y2), int(x1):int(x2)]
|
| 293 |
+
if crop.size > 0:
|
| 294 |
+
player_crops_for_fit.append(crop)
|
| 295 |
+
|
| 296 |
+
# Fit team classifier after collecting samples
|
| 297 |
+
if self.team_classifier and not self.team_classifier_fitted and len(player_crops_for_fit) >= fit_sample_size:
|
| 298 |
+
print(f"Fitting TeamClassifier with {len(player_crops_for_fit)} player crops")
|
| 299 |
+
self.team_classifier.fit(player_crops_for_fit)
|
| 300 |
+
self.team_classifier_fitted = True
|
| 301 |
+
break
|
| 302 |
+
if not self.team_classifier_fitted and len(player_crops_for_fit) >= 16:
|
| 303 |
+
print(f"Fallback: Fitting TeamClassifier with {len(player_crops_for_fit)} player crops")
|
| 304 |
+
self.team_classifier.fit(player_crops_for_fit)
|
| 305 |
+
self.team_classifier_fitted = True
|
| 306 |
+
end = time.time()
|
| 307 |
+
print(f"Fitting Kmeans time: {end - start}")
|
| 308 |
+
|
| 309 |
+
# Second pass: predict teams with configurable frame skipping optimization
|
| 310 |
+
start = time.time()
|
| 311 |
+
|
| 312 |
+
# Get configuration for frame skipping
|
| 313 |
+
prediction_interval = 1 # Default: predict every 2 frames
|
| 314 |
+
iou_threshold = 0.3
|
| 315 |
+
|
| 316 |
+
print(f"Team classification - prediction_interval: {prediction_interval}, iou_threshold: {iou_threshold}")
|
| 317 |
+
|
| 318 |
+
# Storage for predicted frame results: {frame_id: {box_idx: (bbox, team_id)}}
|
| 319 |
+
predicted_frame_data = {}
|
| 320 |
+
|
| 321 |
+
# Step 1: Predict for frames at prediction_interval only
|
| 322 |
+
frames_to_predict = []
|
| 323 |
+
for frame_id in range(len(detection_results)):
|
| 324 |
+
if frame_id % prediction_interval == 0:
|
| 325 |
+
frames_to_predict.append(frame_id)
|
| 326 |
+
|
| 327 |
+
print(f"Predicting teams for {len(frames_to_predict)}/{len(detection_results)} frames "
|
| 328 |
+
f"(saving {100 - (len(frames_to_predict) * 100 // len(detection_results))}% compute)")
|
| 329 |
+
|
| 330 |
+
for frame_id in frames_to_predict:
|
| 331 |
+
detection_box = detection_results[frame_id].boxes.data
|
| 332 |
+
frame_image = decoded_images[frame_id]
|
| 333 |
+
|
| 334 |
+
# Collect player crops for this frame
|
| 335 |
+
frame_player_crops = []
|
| 336 |
+
frame_player_indices = []
|
| 337 |
+
frame_player_boxes = []
|
| 338 |
+
|
| 339 |
+
for idx, box in enumerate(detection_box):
|
| 340 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 341 |
+
if cls_id == 2 and conf < 0.6:
|
| 342 |
+
continue
|
| 343 |
+
mapped_cls_id = str(int(cls_id))
|
| 344 |
+
|
| 345 |
+
# Collect player crops for prediction
|
| 346 |
+
if self.team_classifier and self.team_classifier_fitted and mapped_cls_id == '2':
|
| 347 |
+
crop = frame_image[int(y1):int(y2), int(x1):int(x2)]
|
| 348 |
+
if crop.size > 0:
|
| 349 |
+
frame_player_crops.append(crop)
|
| 350 |
+
frame_player_indices.append(idx)
|
| 351 |
+
frame_player_boxes.append((x1, y1, x2, y2))
|
| 352 |
+
|
| 353 |
+
# Predict teams for all players in this frame
|
| 354 |
+
if len(frame_player_crops) > 0:
|
| 355 |
+
team_ids = self.team_classifier.predict(frame_player_crops)
|
| 356 |
+
predicted_frame_data[frame_id] = {}
|
| 357 |
+
for idx, bbox, team_id in zip(frame_player_indices, frame_player_boxes, team_ids):
|
| 358 |
+
# Map team_id (0,1) to cls_id (6,7)
|
| 359 |
+
team_cls_id = str(6 + int(team_id))
|
| 360 |
+
predicted_frame_data[frame_id][idx] = (bbox, team_cls_id)
|
| 361 |
+
|
| 362 |
+
# Step 2: Process all frames (interpolate skipped frames)
|
| 363 |
+
fallback_count = 0
|
| 364 |
+
interpolated_count = 0
|
| 365 |
+
bboxes: dict[int, list[BoundingBox]] = {}
|
| 366 |
+
for frame_id in range(len(detection_results)):
|
| 367 |
+
detection_box = detection_results[frame_id].boxes.data
|
| 368 |
+
frame_image = decoded_images[frame_id]
|
| 369 |
+
boxes = []
|
| 370 |
+
|
| 371 |
+
team_predictions = {}
|
| 372 |
+
|
| 373 |
+
if frame_id % prediction_interval == 0:
|
| 374 |
+
# Predicted frame: use pre-computed predictions
|
| 375 |
+
if frame_id in predicted_frame_data:
|
| 376 |
+
for idx, (bbox, team_cls_id) in predicted_frame_data[frame_id].items():
|
| 377 |
+
team_predictions[idx] = team_cls_id
|
| 378 |
+
else:
|
| 379 |
+
# Skipped frame: interpolate from neighboring predicted frames
|
| 380 |
+
# Find nearest predicted frames
|
| 381 |
+
prev_predicted_frame = (frame_id // prediction_interval) * prediction_interval
|
| 382 |
+
next_predicted_frame = prev_predicted_frame + prediction_interval
|
| 383 |
+
|
| 384 |
+
# Collect current frame player boxes
|
| 385 |
+
for idx, box in enumerate(detection_box):
|
| 386 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 387 |
+
if cls_id == 2 and conf < 0.6:
|
| 388 |
+
continue
|
| 389 |
+
mapped_cls_id = str(int(cls_id))
|
| 390 |
+
|
| 391 |
+
if self.team_classifier and self.team_classifier_fitted and mapped_cls_id == '2':
|
| 392 |
+
target_box = (x1, y1, x2, y2)
|
| 393 |
+
|
| 394 |
+
# Try to match with previous predicted frame
|
| 395 |
+
best_team_id = None
|
| 396 |
+
best_iou = 0.0
|
| 397 |
+
|
| 398 |
+
if prev_predicted_frame in predicted_frame_data:
|
| 399 |
+
team_id, iou = self._find_best_match(
|
| 400 |
+
target_box,
|
| 401 |
+
predicted_frame_data[prev_predicted_frame],
|
| 402 |
+
iou_threshold
|
| 403 |
+
)
|
| 404 |
+
if team_id is not None:
|
| 405 |
+
best_team_id = team_id
|
| 406 |
+
best_iou = iou
|
| 407 |
+
|
| 408 |
+
# Try to match with next predicted frame if available and no good match yet
|
| 409 |
+
if best_team_id is None and next_predicted_frame < len(detection_results):
|
| 410 |
+
if next_predicted_frame in predicted_frame_data:
|
| 411 |
+
team_id, iou = self._find_best_match(
|
| 412 |
+
target_box,
|
| 413 |
+
predicted_frame_data[next_predicted_frame],
|
| 414 |
+
iou_threshold
|
| 415 |
+
)
|
| 416 |
+
if team_id is not None and iou > best_iou:
|
| 417 |
+
best_team_id = team_id
|
| 418 |
+
best_iou = iou
|
| 419 |
+
|
| 420 |
+
# Track interpolation success
|
| 421 |
+
if best_team_id is not None:
|
| 422 |
+
interpolated_count += 1
|
| 423 |
+
else:
|
| 424 |
+
# Fallback: if no match found, predict individually
|
| 425 |
+
crop = frame_image[int(y1):int(y2), int(x1):int(x2)]
|
| 426 |
+
if crop.size > 0:
|
| 427 |
+
team_id = self.team_classifier.predict([crop])[0]
|
| 428 |
+
best_team_id = str(6 + int(team_id))
|
| 429 |
+
fallback_count += 1
|
| 430 |
+
|
| 431 |
+
if best_team_id is not None:
|
| 432 |
+
team_predictions[idx] = best_team_id
|
| 433 |
+
|
| 434 |
+
# Parse boxes with team classification
|
| 435 |
+
for idx, box in enumerate(detection_box):
|
| 436 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 437 |
+
if cls_id == 2 and conf < 0.6:
|
| 438 |
+
continue
|
| 439 |
+
|
| 440 |
+
# Check overlap with staff box
|
| 441 |
+
overlap_staff = False
|
| 442 |
+
for idy, boxy in enumerate(detection_box):
|
| 443 |
+
s_x1, s_y1, s_x2, s_y2, s_conf, s_cls_id = boxy.tolist()
|
| 444 |
+
if cls_id == 2 and s_cls_id == 4:
|
| 445 |
+
staff_iou = self._calculate_iou(box[:4], boxy[:4])
|
| 446 |
+
if staff_iou >= 0.8:
|
| 447 |
+
overlap_staff = True
|
| 448 |
+
break
|
| 449 |
+
if overlap_staff:
|
| 450 |
+
continue
|
| 451 |
+
|
| 452 |
+
mapped_cls_id = str(int(cls_id))
|
| 453 |
+
|
| 454 |
+
# Override cls_id for players with team prediction
|
| 455 |
+
if idx in team_predictions:
|
| 456 |
+
mapped_cls_id = team_predictions[idx]
|
| 457 |
+
if mapped_cls_id != '4':
|
| 458 |
+
if int(mapped_cls_id) == 3 and conf < 0.5:
|
| 459 |
+
continue
|
| 460 |
+
boxes.append(
|
| 461 |
+
BoundingBox(
|
| 462 |
+
x1=int(x1),
|
| 463 |
+
y1=int(y1),
|
| 464 |
+
x2=int(x2),
|
| 465 |
+
y2=int(y2),
|
| 466 |
+
cls_id=int(mapped_cls_id),
|
| 467 |
+
conf=float(conf),
|
| 468 |
+
)
|
| 469 |
+
)
|
| 470 |
+
# Handle footballs - keep only the best one
|
| 471 |
+
footballs = [bb for bb in boxes if int(bb.cls_id) == 0]
|
| 472 |
+
if len(footballs) > 1:
|
| 473 |
+
best_ball = max(footballs, key=lambda b: b.conf)
|
| 474 |
+
boxes = [bb for bb in boxes if int(bb.cls_id) != 0]
|
| 475 |
+
boxes.append(best_ball)
|
| 476 |
+
|
| 477 |
+
bboxes[offset + frame_id] = boxes
|
| 478 |
+
return bboxes
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
def predict_batch(self, batch_images: List[ndarray], offset: int, n_keypoints: int) -> List[TVFrameResult]:
|
| 482 |
+
start = time.time()
|
| 483 |
+
detection_results = self._detect_objects_batch(batch_images)
|
| 484 |
+
end = time.time()
|
| 485 |
+
print(f"Detection time: {end - start}")
|
| 486 |
+
|
| 487 |
+
# Use hybrid team classification
|
| 488 |
+
start = time.time()
|
| 489 |
+
bboxes = self._team_classify(detection_results, batch_images, offset)
|
| 490 |
+
end = time.time()
|
| 491 |
+
print(f"Team classify time: {end - start}")
|
| 492 |
+
|
| 493 |
+
# Phase 3: Keypoint Detection
|
| 494 |
+
start = time.time()
|
| 495 |
+
keypoints_yolo: Dict[int, List[Tuple[int, int]]] = {}
|
| 496 |
+
|
| 497 |
+
keypoints_yolo = self._detect_keypoints_batch(batch_images, offset, n_keypoints)
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
pitch_batch_size = min(self.pitch_batch_size, len(batch_images))
|
| 501 |
+
keypoints: Dict[int, List[Tuple[int, int]]] = {}
|
| 502 |
+
|
| 503 |
+
start = time.time()
|
| 504 |
+
|
| 505 |
+
while True:
|
| 506 |
+
gc.collect()
|
| 507 |
+
if torch.cuda.is_available():
|
| 508 |
+
torch.cuda.empty_cache()
|
| 509 |
+
torch.cuda.synchronize()
|
| 510 |
+
device_str = "cuda"
|
| 511 |
+
keypoints_result = process_batch_input(
|
| 512 |
+
batch_images,
|
| 513 |
+
self.keypoints_model,
|
| 514 |
+
self.kp_threshold,
|
| 515 |
+
device_str,
|
| 516 |
+
batch_size=pitch_batch_size,
|
| 517 |
+
)
|
| 518 |
+
if keypoints_result is not None and len(keypoints_result) > 0:
|
| 519 |
+
for frame_number_in_batch, kp_dict in enumerate(keypoints_result):
|
| 520 |
+
if frame_number_in_batch >= len(batch_images):
|
| 521 |
+
break
|
| 522 |
+
frame_keypoints: List[Tuple[int, int]] = []
|
| 523 |
+
try:
|
| 524 |
+
height, width = batch_images[frame_number_in_batch].shape[:2]
|
| 525 |
+
if kp_dict is not None and isinstance(kp_dict, dict):
|
| 526 |
+
for idx in range(32):
|
| 527 |
+
x, y = 0, 0
|
| 528 |
+
kp_idx = idx + 1
|
| 529 |
+
if kp_idx in kp_dict:
|
| 530 |
+
try:
|
| 531 |
+
kp_data = kp_dict[kp_idx]
|
| 532 |
+
if isinstance(kp_data, dict) and "x" in kp_data and "y" in kp_data:
|
| 533 |
+
x = int(kp_data["x"] * width)
|
| 534 |
+
y = int(kp_data["y"] * height)
|
| 535 |
+
except (KeyError, TypeError, ValueError):
|
| 536 |
+
pass
|
| 537 |
+
frame_keypoints.append((x, y))
|
| 538 |
+
except (IndexError, ValueError, AttributeError):
|
| 539 |
+
frame_keypoints = [(0, 0)] * 32
|
| 540 |
+
if len(frame_keypoints) < n_keypoints:
|
| 541 |
+
frame_keypoints.extend([(0, 0)] * (n_keypoints - len(frame_keypoints)))
|
| 542 |
+
else:
|
| 543 |
+
frame_keypoints = frame_keypoints[:n_keypoints]
|
| 544 |
+
|
| 545 |
+
# time1 = time.time()
|
| 546 |
+
# frame_keypoints_yolo = keypoints_yolo.get(offset + frame_number_in_batch, frame_keypoints)
|
| 547 |
+
|
| 548 |
+
# valid_keypoints_count = 0
|
| 549 |
+
# valid_keypoints_yolo_count = 0
|
| 550 |
+
# for kp in frame_keypoints:
|
| 551 |
+
# if kp[0] != 0.0 or kp[1] != 0.0:
|
| 552 |
+
# valid_keypoints_count += 1
|
| 553 |
+
# if valid_keypoints_count > 3:
|
| 554 |
+
# break
|
| 555 |
+
|
| 556 |
+
# for kp in frame_keypoints_yolo:
|
| 557 |
+
# if kp[0] != 0.0 or kp[1] != 0.0:
|
| 558 |
+
# valid_keypoints_yolo_count += 1
|
| 559 |
+
# if valid_keypoints_yolo_count > 3:
|
| 560 |
+
# break
|
| 561 |
+
|
| 562 |
+
# # Evaluate and select best keypoints (using batch evaluation for speed)
|
| 563 |
+
# if valid_keypoints_count > 3 and valid_keypoints_yolo_count > 3:
|
| 564 |
+
# try:
|
| 565 |
+
# last_valid_keypoints = keypoints.get(offset + frame_number_in_batch - 1, frame_keypoints)
|
| 566 |
+
# # Evaluate both keypoint sets in batch (much faster!)
|
| 567 |
+
# scores = evaluate_keypoints_batch_for_frame(
|
| 568 |
+
# template_keypoints=self.template_keypoints,
|
| 569 |
+
# frame_keypoints_list=[frame_keypoints, frame_keypoints_yolo, last_valid_keypoints],
|
| 570 |
+
# frame=batch_images[frame_number_in_batch],
|
| 571 |
+
# floor_markings_template=self.template_image,
|
| 572 |
+
# device="cuda"
|
| 573 |
+
# )
|
| 574 |
+
# score = scores[0]
|
| 575 |
+
# score_yolo = scores[1]
|
| 576 |
+
# last_score = scores[2]
|
| 577 |
+
|
| 578 |
+
# if last_score > score and last_score > score_yolo:
|
| 579 |
+
# frame_keypoints = last_valid_keypoints
|
| 580 |
+
# if score_yolo > score:
|
| 581 |
+
# frame_keypoints = frame_keypoints_yolo
|
| 582 |
+
# last_score = score_yolo
|
| 583 |
+
# else:
|
| 584 |
+
# last_score = score
|
| 585 |
+
|
| 586 |
+
# last_valid_keypoints = frame_keypoints
|
| 587 |
+
|
| 588 |
+
# except Exception as e:
|
| 589 |
+
# # Fallback: use YOLO if available, otherwise use pitch model
|
| 590 |
+
# if valid_keypoints_yolo_count > 3:
|
| 591 |
+
# frame_keypoints = frame_keypoints_yolo
|
| 592 |
+
# elif valid_keypoints_yolo_count > 3:
|
| 593 |
+
# # Only YOLO has valid keypoints
|
| 594 |
+
# frame_keypoints = frame_keypoints_yolo
|
| 595 |
+
# else:
|
| 596 |
+
# if last_valid_keypoints is not None:
|
| 597 |
+
# frame_keypoints = last_valid_keypoints
|
| 598 |
+
|
| 599 |
+
# time2 = time.time()
|
| 600 |
+
# print(f"Keypoint evaluation time: {time2 - time1}")
|
| 601 |
+
|
| 602 |
+
keypoints[offset + frame_number_in_batch] = frame_keypoints
|
| 603 |
+
break
|
| 604 |
+
end = time.time()
|
| 605 |
+
print(f"Keypoint time: {end - start}")
|
| 606 |
+
|
| 607 |
+
results: List[TVFrameResult] = []
|
| 608 |
+
for frame_number in range(offset, offset + len(batch_images)):
|
| 609 |
+
frame_boxes = bboxes.get(frame_number, [])
|
| 610 |
+
result = TVFrameResult(
|
| 611 |
+
frame_id=frame_number,
|
| 612 |
+
boxes=frame_boxes,
|
| 613 |
+
keypoints=keypoints.get(
|
| 614 |
+
frame_number,
|
| 615 |
+
[(0, 0) for _ in range(n_keypoints)],
|
| 616 |
+
),
|
| 617 |
+
)
|
| 618 |
+
results.append(result)
|
| 619 |
+
|
| 620 |
+
results_yolo: List[TVFrameResult] = []
|
| 621 |
+
for frame_number in range(offset, offset + len(batch_images)):
|
| 622 |
+
frame_boxes = bboxes.get(frame_number, [])
|
| 623 |
+
result = TVFrameResult(
|
| 624 |
+
frame_id=frame_number,
|
| 625 |
+
boxes=frame_boxes,
|
| 626 |
+
keypoints=keypoints_yolo.get(
|
| 627 |
+
frame_number,
|
| 628 |
+
[(0, 0) for _ in range(n_keypoints)],
|
| 629 |
+
),
|
| 630 |
+
)
|
| 631 |
+
results_yolo.append(result)
|
| 632 |
+
|
| 633 |
+
start = time.time()
|
| 634 |
+
if len(batch_images) > 0:
|
| 635 |
+
h, w = batch_images[0].shape[:2]
|
| 636 |
+
results = run_keypoints_post_processing_v2(
|
| 637 |
+
results, w, h,
|
| 638 |
+
frames=batch_images,
|
| 639 |
+
template_keypoints=self.template_keypoints,
|
| 640 |
+
floor_markings_template=self.template_image,
|
| 641 |
+
offset=offset
|
| 642 |
+
)
|
| 643 |
+
results_yolo = run_keypoints_post_processing_v2(
|
| 644 |
+
results_yolo, w, h,
|
| 645 |
+
frames=batch_images,
|
| 646 |
+
template_keypoints=self.template_keypoints,
|
| 647 |
+
floor_markings_template=self.template_image,
|
| 648 |
+
offset=offset
|
| 649 |
+
)
|
| 650 |
+
end = time.time()
|
| 651 |
+
print(f"Keypoint post processing time: {end - start}")
|
| 652 |
+
|
| 653 |
+
final_keypoints: Dict[int, List[Tuple[int, int]]] = {}
|
| 654 |
+
|
| 655 |
+
for frame_number_in_batch, (result, result_yolo) in enumerate(zip(results, results_yolo)):
|
| 656 |
+
frame_keypoints = result.keypoints
|
| 657 |
+
try:
|
| 658 |
+
if self.last_valid_keypoints is None:
|
| 659 |
+
self.last_valid_keypoints = final_keypoints.get(offset + frame_number_in_batch - 1, self.last_valid_keypoints)
|
| 660 |
+
# Evaluate both keypoint sets in batch (much faster!)
|
| 661 |
+
scores = evaluate_keypoints_batch_for_frame(
|
| 662 |
+
template_keypoints=self.template_keypoints,
|
| 663 |
+
frame_keypoints_list=[result.keypoints, result_yolo.keypoints, self.last_valid_keypoints],
|
| 664 |
+
frame=batch_images[frame_number_in_batch],
|
| 665 |
+
floor_markings_template=self.template_image,
|
| 666 |
+
device="cuda"
|
| 667 |
+
)
|
| 668 |
+
score = scores[0]
|
| 669 |
+
score_yolo = scores[1]
|
| 670 |
+
self.last_score = scores[2]
|
| 671 |
+
|
| 672 |
+
if self.last_score > score and self.last_score > score_yolo:
|
| 673 |
+
frame_keypoints = self.last_valid_keypoints
|
| 674 |
+
elif score_yolo > score:
|
| 675 |
+
frame_keypoints = result_yolo.keypoints
|
| 676 |
+
self.last_score = score_yolo
|
| 677 |
+
else:
|
| 678 |
+
self.last_score = score
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
except Exception as e:
|
| 682 |
+
# Fallback: use YOLO if available, otherwise use pitch model
|
| 683 |
+
print('Error: ', e)
|
| 684 |
+
|
| 685 |
+
self.last_valid_keypoints = frame_keypoints
|
| 686 |
+
|
| 687 |
+
final_keypoints[offset + frame_number_in_batch] = frame_keypoints
|
| 688 |
+
|
| 689 |
+
|
| 690 |
+
final_results: List[TVFrameResult] = []
|
| 691 |
+
for frame_number in range(offset, offset + len(batch_images)):
|
| 692 |
+
frame_boxes = bboxes.get(frame_number, [])
|
| 693 |
+
result = TVFrameResult(
|
| 694 |
+
frame_id=frame_number,
|
| 695 |
+
boxes=frame_boxes,
|
| 696 |
+
keypoints=final_keypoints.get(
|
| 697 |
+
frame_number,
|
| 698 |
+
[(0, 0) for _ in range(n_keypoints)],
|
| 699 |
+
),
|
| 700 |
+
)
|
| 701 |
+
final_results.append(result)
|
| 702 |
+
|
| 703 |
+
|
| 704 |
+
gc.collect()
|
| 705 |
+
if torch.cuda.is_available():
|
| 706 |
+
torch.cuda.empty_cache()
|
| 707 |
+
torch.cuda.synchronize()
|
| 708 |
+
|
| 709 |
+
return final_results
|
| 710 |
+
|
| 711 |
+
def _detect_keypoints_batch(self, batch_images: List[ndarray],
|
| 712 |
+
offset: int, n_keypoints: int) -> Dict[int, List[Tuple[int, int]]]:
|
| 713 |
+
"""
|
| 714 |
+
Phase 3: Keypoint detection for all frames in batch.
|
| 715 |
+
|
| 716 |
+
Args:
|
| 717 |
+
batch_images: List of images to process
|
| 718 |
+
offset: Frame offset for numbering
|
| 719 |
+
n_keypoints: Number of keypoints expected
|
| 720 |
+
|
| 721 |
+
Returns:
|
| 722 |
+
Dictionary mapping frame_id to list of keypoint coordinates
|
| 723 |
+
"""
|
| 724 |
+
keypoints: Dict[int, List[Tuple[int, int]]] = {}
|
| 725 |
+
keypoints_model_results = self.keypoints_model_yolo.predict(batch_images)
|
| 726 |
+
|
| 727 |
+
if keypoints_model_results is None:
|
| 728 |
+
return keypoints
|
| 729 |
+
|
| 730 |
+
for frame_idx_in_batch, detection in enumerate(keypoints_model_results):
|
| 731 |
+
if not hasattr(detection, "keypoints") or detection.keypoints is None:
|
| 732 |
+
continue
|
| 733 |
+
|
| 734 |
+
# Extract keypoints with confidence
|
| 735 |
+
frame_keypoints_with_conf: List[Tuple[int, int, float]] = []
|
| 736 |
+
for i, part_points in enumerate(detection.keypoints.data):
|
| 737 |
+
for k_id, (x, y, _) in enumerate(part_points):
|
| 738 |
+
confidence = float(detection.keypoints.conf[i][k_id])
|
| 739 |
+
frame_keypoints_with_conf.append((int(x), int(y), confidence))
|
| 740 |
+
|
| 741 |
+
# Pad or truncate to expected number of keypoints
|
| 742 |
+
if len(frame_keypoints_with_conf) < n_keypoints:
|
| 743 |
+
frame_keypoints_with_conf.extend(
|
| 744 |
+
[(0, 0, 0.0)] * (n_keypoints - len(frame_keypoints_with_conf))
|
| 745 |
+
)
|
| 746 |
+
else:
|
| 747 |
+
frame_keypoints_with_conf = frame_keypoints_with_conf[:n_keypoints]
|
| 748 |
+
|
| 749 |
+
# Filter keypoints based on confidence thresholds
|
| 750 |
+
filtered_keypoints: List[Tuple[int, int]] = []
|
| 751 |
+
for idx, (x, y, confidence) in enumerate(frame_keypoints_with_conf):
|
| 752 |
+
if idx in self.CORNER_INDICES:
|
| 753 |
+
# Corner keypoints have lower confidence threshold
|
| 754 |
+
if confidence < 0.3:
|
| 755 |
+
filtered_keypoints.append((0, 0))
|
| 756 |
+
else:
|
| 757 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 758 |
+
else:
|
| 759 |
+
# Regular keypoints
|
| 760 |
+
if confidence < 0.5:
|
| 761 |
+
filtered_keypoints.append((0, 0))
|
| 762 |
+
else:
|
| 763 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 764 |
+
|
| 765 |
+
frame_id = offset + frame_idx_in_batch
|
| 766 |
+
keypoints[frame_id] = filtered_keypoints
|
| 767 |
+
|
| 768 |
+
return keypoints
|
| 769 |
+
|
| 770 |
+
def predict_keypoints(
|
| 771 |
+
self,
|
| 772 |
+
images: List[ndarray],
|
| 773 |
+
n_keypoints: int = 32,
|
| 774 |
+
batch_size: Optional[int] = None,
|
| 775 |
+
conf_threshold: float = 0.5,
|
| 776 |
+
corner_conf_threshold: float = 0.3,
|
| 777 |
+
verbose: bool = False
|
| 778 |
+
) -> Dict[int, List[Tuple[int, int]]]:
|
| 779 |
+
"""
|
| 780 |
+
Standalone function for keypoint detection on a list of images.
|
| 781 |
+
Optimized for maximum prediction speed.
|
| 782 |
+
|
| 783 |
+
Args:
|
| 784 |
+
images: List of images (numpy arrays) to process
|
| 785 |
+
n_keypoints: Number of keypoints expected per frame (default: 32)
|
| 786 |
+
batch_size: Batch size for YOLO prediction (None = auto, uses all images)
|
| 787 |
+
conf_threshold: Confidence threshold for regular keypoints (default: 0.5)
|
| 788 |
+
corner_conf_threshold: Confidence threshold for corner keypoints (default: 0.3)
|
| 789 |
+
verbose: Whether to print progress information
|
| 790 |
+
|
| 791 |
+
Returns:
|
| 792 |
+
Dictionary mapping frame index to list of keypoint coordinates (x, y)
|
| 793 |
+
Frame indices start from 0
|
| 794 |
+
"""
|
| 795 |
+
if not images:
|
| 796 |
+
return {}
|
| 797 |
+
|
| 798 |
+
keypoints: Dict[int, List[Tuple[int, int]]] = {}
|
| 799 |
+
|
| 800 |
+
# Use provided batch_size or process all at once for maximum speed
|
| 801 |
+
if batch_size is None:
|
| 802 |
+
batch_size = len(images)
|
| 803 |
+
|
| 804 |
+
# Process in batches for optimal GPU utilization
|
| 805 |
+
for batch_start in range(0, len(images), batch_size):
|
| 806 |
+
batch_end = min(batch_start + batch_size, len(images))
|
| 807 |
+
batch_images = images[batch_start:batch_end]
|
| 808 |
+
|
| 809 |
+
if verbose:
|
| 810 |
+
print(f"Processing keypoints batch {batch_start}-{batch_end-1} ({len(batch_images)} images)")
|
| 811 |
+
|
| 812 |
+
# YOLO keypoint prediction (optimized batch processing)
|
| 813 |
+
keypoints_model_results = self.keypoints_model_yolo.predict(
|
| 814 |
+
batch_images,
|
| 815 |
+
verbose=False,
|
| 816 |
+
save=False,
|
| 817 |
+
conf=0.1, # Lower conf for detection, we filter later
|
| 818 |
+
)
|
| 819 |
+
|
| 820 |
+
if keypoints_model_results is None:
|
| 821 |
+
# Fill with empty keypoints for this batch
|
| 822 |
+
for frame_idx in range(batch_start, batch_end):
|
| 823 |
+
keypoints[frame_idx] = [(0, 0)] * n_keypoints
|
| 824 |
+
continue
|
| 825 |
+
|
| 826 |
+
# Process each frame in the batch
|
| 827 |
+
for batch_idx, detection in enumerate(keypoints_model_results):
|
| 828 |
+
frame_idx = batch_start + batch_idx
|
| 829 |
+
|
| 830 |
+
if not hasattr(detection, "keypoints") or detection.keypoints is None:
|
| 831 |
+
keypoints[frame_idx] = [(0, 0)] * n_keypoints
|
| 832 |
+
continue
|
| 833 |
+
|
| 834 |
+
# Extract keypoints with confidence
|
| 835 |
+
frame_keypoints_with_conf: List[Tuple[int, int, float]] = []
|
| 836 |
+
try:
|
| 837 |
+
for i, part_points in enumerate(detection.keypoints.data):
|
| 838 |
+
for k_id, (x, y, _) in enumerate(part_points):
|
| 839 |
+
confidence = float(detection.keypoints.conf[i][k_id])
|
| 840 |
+
frame_keypoints_with_conf.append((int(x), int(y), confidence))
|
| 841 |
+
except (AttributeError, IndexError, TypeError):
|
| 842 |
+
keypoints[frame_idx] = [(0, 0)] * n_keypoints
|
| 843 |
+
continue
|
| 844 |
+
|
| 845 |
+
# Pad or truncate to expected number of keypoints
|
| 846 |
+
if len(frame_keypoints_with_conf) < n_keypoints:
|
| 847 |
+
frame_keypoints_with_conf.extend(
|
| 848 |
+
[(0, 0, 0.0)] * (n_keypoints - len(frame_keypoints_with_conf))
|
| 849 |
+
)
|
| 850 |
+
else:
|
| 851 |
+
frame_keypoints_with_conf = frame_keypoints_with_conf[:n_keypoints]
|
| 852 |
+
|
| 853 |
+
# Filter keypoints based on confidence thresholds
|
| 854 |
+
filtered_keypoints: List[Tuple[int, int]] = []
|
| 855 |
+
for idx, (x, y, confidence) in enumerate(frame_keypoints_with_conf):
|
| 856 |
+
if idx in self.CORNER_INDICES:
|
| 857 |
+
# Corner keypoints have lower confidence threshold
|
| 858 |
+
if confidence < corner_conf_threshold:
|
| 859 |
+
filtered_keypoints.append((0, 0))
|
| 860 |
+
else:
|
| 861 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 862 |
+
else:
|
| 863 |
+
# Regular keypoints
|
| 864 |
+
if confidence < conf_threshold:
|
| 865 |
+
filtered_keypoints.append((0, 0))
|
| 866 |
+
else:
|
| 867 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 868 |
+
|
| 869 |
+
keypoints[frame_idx] = filtered_keypoints
|
| 870 |
+
|
| 871 |
+
return keypoints
|
| 872 |
+
|
| 873 |
+
def predict_objects(
|
| 874 |
+
self,
|
| 875 |
+
images: List[ndarray],
|
| 876 |
+
batch_size: Optional[int] = 16,
|
| 877 |
+
conf_threshold: float = 0.5,
|
| 878 |
+
iou_threshold: float = 0.45,
|
| 879 |
+
classes: Optional[List[int]] = None,
|
| 880 |
+
verbose: bool = False,
|
| 881 |
+
) -> Dict[int, List[BoundingBox]]:
|
| 882 |
+
"""
|
| 883 |
+
Standalone high-throughput object detection function.
|
| 884 |
+
Runs the YOLO detector directly on raw images while skipping
|
| 885 |
+
any team-classification or keypoint stages for maximum FPS.
|
| 886 |
+
|
| 887 |
+
Args:
|
| 888 |
+
images: List of frames (BGR numpy arrays).
|
| 889 |
+
batch_size: Number of frames per inference pass. Use None to process
|
| 890 |
+
all frames at once (fastest but highest memory usage).
|
| 891 |
+
conf_threshold: Detection confidence threshold.
|
| 892 |
+
iou_threshold: IoU threshold for NMS within YOLO.
|
| 893 |
+
classes: Optional list of class IDs to keep (None = all classes).
|
| 894 |
+
verbose: Whether to print per-batch progress from YOLO.
|
| 895 |
+
|
| 896 |
+
Returns:
|
| 897 |
+
Dict mapping frame index -> list of BoundingBox predictions.
|
| 898 |
+
"""
|
| 899 |
+
if not images:
|
| 900 |
+
return {}
|
| 901 |
+
|
| 902 |
+
detections: Dict[int, List[BoundingBox]] = {}
|
| 903 |
+
effective_batch = len(images) if batch_size is None else max(1, batch_size)
|
| 904 |
+
|
| 905 |
+
for batch_start in range(0, len(images), effective_batch):
|
| 906 |
+
batch_end = min(batch_start + effective_batch, len(images))
|
| 907 |
+
batch_images = images[batch_start:batch_end]
|
| 908 |
+
|
| 909 |
+
start = time.time()
|
| 910 |
+
yolo_results = self.bbox_model(
|
| 911 |
+
batch_images,
|
| 912 |
+
conf=conf_threshold,
|
| 913 |
+
iou=iou_threshold,
|
| 914 |
+
classes=classes,
|
| 915 |
+
verbose=verbose,
|
| 916 |
+
save=False,
|
| 917 |
+
)
|
| 918 |
+
end = time.time()
|
| 919 |
+
print(f"YOLO time: {end - start}")
|
| 920 |
+
|
| 921 |
+
for local_idx, result in enumerate(yolo_results):
|
| 922 |
+
frame_idx = batch_start + local_idx
|
| 923 |
+
frame_boxes: List[BoundingBox] = []
|
| 924 |
+
|
| 925 |
+
if not hasattr(result, "boxes") or result.boxes is None:
|
| 926 |
+
detections[frame_idx] = frame_boxes
|
| 927 |
+
continue
|
| 928 |
+
|
| 929 |
+
boxes_tensor = result.boxes.data
|
| 930 |
+
if boxes_tensor is None:
|
| 931 |
+
detections[frame_idx] = frame_boxes
|
| 932 |
+
continue
|
| 933 |
+
|
| 934 |
+
for box in boxes_tensor:
|
| 935 |
+
try:
|
| 936 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 937 |
+
frame_boxes.append(
|
| 938 |
+
BoundingBox(
|
| 939 |
+
x1=int(x1),
|
| 940 |
+
y1=int(y1),
|
| 941 |
+
x2=int(x2),
|
| 942 |
+
y2=int(y2),
|
| 943 |
+
cls_id=int(cls_id),
|
| 944 |
+
conf=float(conf),
|
| 945 |
+
)
|
| 946 |
+
)
|
| 947 |
+
except (ValueError, TypeError):
|
| 948 |
+
continue
|
| 949 |
+
|
| 950 |
+
detections[frame_idx] = frame_boxes
|
| 951 |
+
|
| 952 |
+
return detections
|
| 953 |
+
|
miner3.py
ADDED
|
@@ -0,0 +1,952 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
from typing import List, Tuple, Dict, Optional
|
| 3 |
+
import sys
|
| 4 |
+
import os
|
| 5 |
+
import psutil
|
| 6 |
+
|
| 7 |
+
from numpy import ndarray
|
| 8 |
+
from pydantic import BaseModel
|
| 9 |
+
from multiprocessing import cpu_count
|
| 10 |
+
|
| 11 |
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
| 12 |
+
from keypoint_helper_v2_optimized import run_keypoints_post_processing
|
| 13 |
+
|
| 14 |
+
from ultralytics import YOLO
|
| 15 |
+
from team_cluster import TeamClassifier
|
| 16 |
+
from utils import (
|
| 17 |
+
BoundingBox,
|
| 18 |
+
Constants,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
import time
|
| 22 |
+
import torch
|
| 23 |
+
import gc
|
| 24 |
+
import cv2
|
| 25 |
+
import numpy as np
|
| 26 |
+
from collections import defaultdict
|
| 27 |
+
from pitch import process_batch_input, get_cls_net
|
| 28 |
+
from keypoint_evaluation import (
|
| 29 |
+
evaluate_keypoints_for_frame,
|
| 30 |
+
evaluate_keypoints_for_frame_gpu,
|
| 31 |
+
load_template_from_file,
|
| 32 |
+
evaluate_keypoints_for_frame_opencv_cuda,
|
| 33 |
+
evaluate_keypoints_batch_for_frame,
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
import yaml
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class BoundingBox(BaseModel):
|
| 40 |
+
x1: int
|
| 41 |
+
y1: int
|
| 42 |
+
x2: int
|
| 43 |
+
y2: int
|
| 44 |
+
cls_id: int
|
| 45 |
+
conf: float
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class TVFrameResult(BaseModel):
|
| 49 |
+
frame_id: int
|
| 50 |
+
boxes: List[BoundingBox]
|
| 51 |
+
keypoints: List[Tuple[int, int]]
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class Miner:
|
| 55 |
+
SMALL_CONTAINED_IOA = Constants.SMALL_CONTAINED_IOA
|
| 56 |
+
SMALL_RATIO_MAX = Constants.SMALL_RATIO_MAX
|
| 57 |
+
SINGLE_PLAYER_HUE_PIVOT = Constants.SINGLE_PLAYER_HUE_PIVOT
|
| 58 |
+
CORNER_INDICES = Constants.CORNER_INDICES
|
| 59 |
+
KEYPOINTS_CONFIDENCE = Constants.KEYPOINTS_CONFIDENCE + 0.3
|
| 60 |
+
CORNER_CONFIDENCE = Constants.CORNER_CONFIDENCE
|
| 61 |
+
GOALKEEPER_POSITION_MARGIN = Constants.GOALKEEPER_POSITION_MARGIN
|
| 62 |
+
MIN_SAMPLES_FOR_FIT = 16 # Minimum player crops needed before fitting TeamClassifier
|
| 63 |
+
MAX_SAMPLES_FOR_FIT = 1000 # Maximum samples to avoid overfitting
|
| 64 |
+
|
| 65 |
+
def __init__(self, path_hf_repo: Path) -> None:
|
| 66 |
+
try:
|
| 67 |
+
|
| 68 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 69 |
+
model_path = path_hf_repo / "detection.onnx"
|
| 70 |
+
self.bbox_model = YOLO(model_path)
|
| 71 |
+
|
| 72 |
+
print(f"BBox Model Loaded: class name {self.bbox_model.names}")
|
| 73 |
+
|
| 74 |
+
team_model_path = path_hf_repo / "osnet_model.pth.tar-100"
|
| 75 |
+
self.team_classifier = TeamClassifier(
|
| 76 |
+
device=device,
|
| 77 |
+
batch_size=32,
|
| 78 |
+
model_name=str(team_model_path)
|
| 79 |
+
)
|
| 80 |
+
print("Team Classifier Loaded")
|
| 81 |
+
|
| 82 |
+
self.last_score = 0
|
| 83 |
+
self.last_valid_keypoints = None
|
| 84 |
+
# Team classification state
|
| 85 |
+
self.team_classifier_fitted = False
|
| 86 |
+
self.player_crops_for_fit = []
|
| 87 |
+
|
| 88 |
+
self.keypoints_model_yolo = YOLO(path_hf_repo / "keypoint.pt")
|
| 89 |
+
|
| 90 |
+
model_kp_path = path_hf_repo / 'keypoint'
|
| 91 |
+
config_kp_path = path_hf_repo / 'hrnetv2_w48.yaml'
|
| 92 |
+
cfg_kp = yaml.safe_load(open(config_kp_path, 'r'))
|
| 93 |
+
|
| 94 |
+
loaded_state_kp = torch.load(model_kp_path, map_location=device)
|
| 95 |
+
model = get_cls_net(cfg_kp)
|
| 96 |
+
model.load_state_dict(loaded_state_kp)
|
| 97 |
+
model.to(device)
|
| 98 |
+
model.eval()
|
| 99 |
+
|
| 100 |
+
self.keypoints_model = model
|
| 101 |
+
print("Keypoints Model (keypoint.pt) Loaded")
|
| 102 |
+
|
| 103 |
+
template_image_path = path_hf_repo / "football_pitch_template.png"
|
| 104 |
+
self.template_image, self.template_keypoints = load_template_from_file(str(template_image_path))
|
| 105 |
+
|
| 106 |
+
self.kp_threshold = 0.3
|
| 107 |
+
self.pitch_batch_size = 4
|
| 108 |
+
self.health = "healthy"
|
| 109 |
+
|
| 110 |
+
print("✅ Keypoints Model Loaded")
|
| 111 |
+
except Exception as e:
|
| 112 |
+
self.health = "❌ Miner initialization failed: " + str(e)
|
| 113 |
+
print(self.health)
|
| 114 |
+
|
| 115 |
+
def __repr__(self) -> str:
|
| 116 |
+
if self.health == 'healthy':
|
| 117 |
+
return (
|
| 118 |
+
f"health: {self.health}\n"
|
| 119 |
+
f"BBox Model: {type(self.bbox_model).__name__}\n"
|
| 120 |
+
f"Keypoints Model: {type(self.keypoints_model).__name__}"
|
| 121 |
+
f"CPU Count: {cpu_count()}\n"
|
| 122 |
+
f"CPU Speed: {psutil.cpu_freq().current/1000:.2f} GHz"
|
| 123 |
+
)
|
| 124 |
+
else:
|
| 125 |
+
return self.health
|
| 126 |
+
|
| 127 |
+
def _calculate_iou(self, box1: Tuple[float, float, float, float],
|
| 128 |
+
box2: Tuple[float, float, float, float]) -> float:
|
| 129 |
+
"""
|
| 130 |
+
Calculate Intersection over Union (IoU) between two bounding boxes.
|
| 131 |
+
Args:
|
| 132 |
+
box1: (x1, y1, x2, y2)
|
| 133 |
+
box2: (x1, y1, x2, y2)
|
| 134 |
+
Returns:
|
| 135 |
+
IoU score (0-1)
|
| 136 |
+
"""
|
| 137 |
+
x1_1, y1_1, x2_1, y2_1 = box1
|
| 138 |
+
x1_2, y1_2, x2_2, y2_2 = box2
|
| 139 |
+
|
| 140 |
+
# Calculate intersection area
|
| 141 |
+
x_left = max(x1_1, x1_2)
|
| 142 |
+
y_top = max(y1_1, y1_2)
|
| 143 |
+
x_right = min(x2_1, x2_2)
|
| 144 |
+
y_bottom = min(y2_1, y2_2)
|
| 145 |
+
|
| 146 |
+
if x_right < x_left or y_bottom < y_top:
|
| 147 |
+
return 0.0
|
| 148 |
+
|
| 149 |
+
intersection_area = (x_right - x_left) * (y_bottom - y_top)
|
| 150 |
+
|
| 151 |
+
# Calculate union area
|
| 152 |
+
box1_area = (x2_1 - x1_1) * (y2_1 - y1_1)
|
| 153 |
+
box2_area = (x2_2 - x1_2) * (y2_2 - y1_2)
|
| 154 |
+
union_area = box1_area + box2_area - intersection_area
|
| 155 |
+
|
| 156 |
+
if union_area == 0:
|
| 157 |
+
return 0.0
|
| 158 |
+
|
| 159 |
+
return intersection_area / union_area
|
| 160 |
+
|
| 161 |
+
def _extract_jersey_region(self, crop: ndarray) -> ndarray:
|
| 162 |
+
"""
|
| 163 |
+
Extract jersey region (upper body) from player crop.
|
| 164 |
+
For close-ups, focuses on upper 60%, for distant shots uses full crop.
|
| 165 |
+
"""
|
| 166 |
+
if crop is None or crop.size == 0:
|
| 167 |
+
return crop
|
| 168 |
+
|
| 169 |
+
h, w = crop.shape[:2]
|
| 170 |
+
if h < 10 or w < 10:
|
| 171 |
+
return crop
|
| 172 |
+
|
| 173 |
+
# For close-up shots, extract upper body (jersey region)
|
| 174 |
+
is_closeup = h > 100 or (h * w) > 12000
|
| 175 |
+
if is_closeup:
|
| 176 |
+
# Upper 60% of the crop (jersey area, avoiding shorts)
|
| 177 |
+
jersey_top = 0
|
| 178 |
+
jersey_bottom = int(h * 0.60)
|
| 179 |
+
jersey_left = max(0, int(w * 0.05))
|
| 180 |
+
jersey_right = min(w, int(w * 0.95))
|
| 181 |
+
return crop[jersey_top:jersey_bottom, jersey_left:jersey_right]
|
| 182 |
+
return crop
|
| 183 |
+
|
| 184 |
+
def _extract_color_signature(self, crop: ndarray) -> Optional[np.ndarray]:
|
| 185 |
+
"""
|
| 186 |
+
Extract color signature from jersey region using HSV and LAB color spaces.
|
| 187 |
+
Returns a feature vector with dominant colors and color statistics.
|
| 188 |
+
"""
|
| 189 |
+
if crop is None or crop.size == 0:
|
| 190 |
+
return None
|
| 191 |
+
|
| 192 |
+
jersey_region = self._extract_jersey_region(crop)
|
| 193 |
+
if jersey_region.size == 0:
|
| 194 |
+
return None
|
| 195 |
+
|
| 196 |
+
try:
|
| 197 |
+
# Convert to HSV and LAB color spaces
|
| 198 |
+
hsv = cv2.cvtColor(jersey_region, cv2.COLOR_BGR2HSV)
|
| 199 |
+
lab = cv2.cvtColor(jersey_region, cv2.COLOR_BGR2LAB)
|
| 200 |
+
|
| 201 |
+
# Reshape for processing
|
| 202 |
+
hsv_flat = hsv.reshape(-1, 3).astype(np.float32)
|
| 203 |
+
lab_flat = lab.reshape(-1, 3).astype(np.float32)
|
| 204 |
+
|
| 205 |
+
# Compute statistics for HSV
|
| 206 |
+
hsv_mean = np.mean(hsv_flat, axis=0) / 255.0
|
| 207 |
+
hsv_std = np.std(hsv_flat, axis=0) / 255.0
|
| 208 |
+
|
| 209 |
+
# Compute statistics for LAB
|
| 210 |
+
lab_mean = np.mean(lab_flat, axis=0) / 255.0
|
| 211 |
+
lab_std = np.std(lab_flat, axis=0) / 255.0
|
| 212 |
+
|
| 213 |
+
# Dominant color (most frequent hue)
|
| 214 |
+
hue_hist, _ = np.histogram(hsv_flat[:, 0], bins=36, range=(0, 180))
|
| 215 |
+
dominant_hue = np.argmax(hue_hist) * 5 # Convert to hue value
|
| 216 |
+
|
| 217 |
+
# Combine features
|
| 218 |
+
color_features = np.concatenate([
|
| 219 |
+
hsv_mean,
|
| 220 |
+
hsv_std,
|
| 221 |
+
lab_mean[:2], # L and A channels (B is less informative)
|
| 222 |
+
lab_std[:2],
|
| 223 |
+
[dominant_hue / 180.0] # Normalized dominant hue
|
| 224 |
+
])
|
| 225 |
+
|
| 226 |
+
return color_features
|
| 227 |
+
except Exception as e:
|
| 228 |
+
print(f"Error extracting color signature: {e}")
|
| 229 |
+
return None
|
| 230 |
+
|
| 231 |
+
def _get_spatial_position(self, bbox: Tuple[float, float, float, float],
|
| 232 |
+
frame_width: int, frame_height: int) -> Tuple[float, float]:
|
| 233 |
+
"""
|
| 234 |
+
Get normalized spatial position of player on the pitch.
|
| 235 |
+
Returns (x_normalized, y_normalized) where 0,0 is top-left.
|
| 236 |
+
"""
|
| 237 |
+
x1, y1, x2, y2 = bbox
|
| 238 |
+
center_x = (x1 + x2) / 2.0
|
| 239 |
+
center_y = (y1 + y2) / 2.0
|
| 240 |
+
|
| 241 |
+
# Normalize to [0, 1]
|
| 242 |
+
x_norm = center_x / frame_width if frame_width > 0 else 0.5
|
| 243 |
+
y_norm = center_y / frame_height if frame_height > 0 else 0.5
|
| 244 |
+
|
| 245 |
+
return (x_norm, y_norm)
|
| 246 |
+
|
| 247 |
+
def _find_best_match(self, target_box: Tuple[float, float, float, float],
|
| 248 |
+
predicted_frame_data: Dict[int, Tuple[Tuple, str]],
|
| 249 |
+
iou_threshold: float) -> Tuple[Optional[str], float]:
|
| 250 |
+
"""
|
| 251 |
+
Find best matching box in predicted frame data using IoU.
|
| 252 |
+
Optimized with vectorized calculations when possible.
|
| 253 |
+
"""
|
| 254 |
+
if len(predicted_frame_data) == 0:
|
| 255 |
+
return (None, 0.0)
|
| 256 |
+
|
| 257 |
+
# Vectorized IoU calculation for better performance
|
| 258 |
+
target_array = np.array(target_box, dtype=np.float32)
|
| 259 |
+
bboxes_array = np.array([bbox for bbox, _ in predicted_frame_data.values()], dtype=np.float32)
|
| 260 |
+
team_ids = [team_cls_id for _, team_cls_id in predicted_frame_data.values()]
|
| 261 |
+
|
| 262 |
+
# Calculate IoU for all boxes at once using vectorization
|
| 263 |
+
# Extract coordinates
|
| 264 |
+
t_x1, t_y1, t_x2, t_y2 = target_array
|
| 265 |
+
b_x1 = bboxes_array[:, 0]
|
| 266 |
+
b_y1 = bboxes_array[:, 1]
|
| 267 |
+
b_x2 = bboxes_array[:, 2]
|
| 268 |
+
b_y2 = bboxes_array[:, 3]
|
| 269 |
+
|
| 270 |
+
# Calculate intersection
|
| 271 |
+
x_left = np.maximum(t_x1, b_x1)
|
| 272 |
+
y_top = np.maximum(t_y1, b_y1)
|
| 273 |
+
x_right = np.minimum(t_x2, b_x2)
|
| 274 |
+
y_bottom = np.minimum(t_y2, b_y2)
|
| 275 |
+
|
| 276 |
+
# Intersection area
|
| 277 |
+
intersection = np.maximum(0, x_right - x_left) * np.maximum(0, y_bottom - y_top)
|
| 278 |
+
|
| 279 |
+
# Union area
|
| 280 |
+
target_area = (t_x2 - t_x1) * (t_y2 - t_y1)
|
| 281 |
+
bbox_areas = (b_x2 - b_x1) * (b_y2 - b_y1)
|
| 282 |
+
union = target_area + bbox_areas - intersection
|
| 283 |
+
|
| 284 |
+
# IoU (avoid division by zero)
|
| 285 |
+
ious = np.where(union > 0, intersection / union, 0.0)
|
| 286 |
+
|
| 287 |
+
# Find best match above threshold
|
| 288 |
+
valid_mask = ious >= iou_threshold
|
| 289 |
+
if np.any(valid_mask):
|
| 290 |
+
best_idx = np.argmax(ious)
|
| 291 |
+
if ious[best_idx] >= iou_threshold:
|
| 292 |
+
return (team_ids[best_idx], float(ious[best_idx]))
|
| 293 |
+
|
| 294 |
+
return (None, 0.0)
|
| 295 |
+
|
| 296 |
+
def _detect_objects_batch(self, decoded_images: List[ndarray]) -> Dict[int, List[BoundingBox]]:
|
| 297 |
+
batch_size = 16
|
| 298 |
+
detection_results = []
|
| 299 |
+
n_frames = len(decoded_images)
|
| 300 |
+
for frame_number in range(0, n_frames, batch_size):
|
| 301 |
+
batch_images = decoded_images[frame_number: frame_number + batch_size]
|
| 302 |
+
detections = self.bbox_model(batch_images, verbose=False, save=False)
|
| 303 |
+
detection_results.extend(detections)
|
| 304 |
+
|
| 305 |
+
return detection_results
|
| 306 |
+
|
| 307 |
+
def _team_classify(self, detection_results, decoded_images, offset):
|
| 308 |
+
self.team_classifier_fitted = False
|
| 309 |
+
start = time.time()
|
| 310 |
+
# Collect player crops from first batch for fitting
|
| 311 |
+
fit_sample_size = 1000
|
| 312 |
+
player_crops_for_fit = []
|
| 313 |
+
|
| 314 |
+
for frame_id in range(len(detection_results)):
|
| 315 |
+
detection_box = detection_results[frame_id].boxes.data
|
| 316 |
+
if len(detection_box) < 4:
|
| 317 |
+
continue
|
| 318 |
+
# Collect player boxes for team classification fitting (first batch only)
|
| 319 |
+
if len(player_crops_for_fit) < fit_sample_size:
|
| 320 |
+
frame_image = decoded_images[frame_id]
|
| 321 |
+
for box in detection_box:
|
| 322 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 323 |
+
if conf < 0.5:
|
| 324 |
+
continue
|
| 325 |
+
mapped_cls_id = str(int(cls_id))
|
| 326 |
+
# Only collect player crops (cls_id = 2)
|
| 327 |
+
if mapped_cls_id == '2':
|
| 328 |
+
crop = frame_image[int(y1):int(y2), int(x1):int(x2)]
|
| 329 |
+
if crop.size > 0:
|
| 330 |
+
player_crops_for_fit.append(crop)
|
| 331 |
+
|
| 332 |
+
# Fit team classifier after collecting samples
|
| 333 |
+
if self.team_classifier and not self.team_classifier_fitted and len(player_crops_for_fit) >= fit_sample_size:
|
| 334 |
+
print(f"Fitting TeamClassifier with {len(player_crops_for_fit)} player crops")
|
| 335 |
+
self.team_classifier.fit(player_crops_for_fit)
|
| 336 |
+
self.team_classifier_fitted = True
|
| 337 |
+
break
|
| 338 |
+
if not self.team_classifier_fitted and len(player_crops_for_fit) >= 16:
|
| 339 |
+
print(f"Fallback: Fitting TeamClassifier with {len(player_crops_for_fit)} player crops")
|
| 340 |
+
self.team_classifier.fit(player_crops_for_fit)
|
| 341 |
+
self.team_classifier_fitted = True
|
| 342 |
+
end = time.time()
|
| 343 |
+
print(f"Fitting Kmeans time: {end - start}")
|
| 344 |
+
|
| 345 |
+
# Second pass: predict teams with configurable frame skipping optimization
|
| 346 |
+
start = time.time()
|
| 347 |
+
|
| 348 |
+
# Get configuration for frame skipping
|
| 349 |
+
prediction_interval = 1 # Default: predict every 2 frames
|
| 350 |
+
iou_threshold = 0.3
|
| 351 |
+
|
| 352 |
+
print(f"Team classification - prediction_interval: {prediction_interval}, iou_threshold: {iou_threshold}")
|
| 353 |
+
|
| 354 |
+
# Storage for predicted frame results: {frame_id: {box_idx: (bbox, team_id)}}
|
| 355 |
+
predicted_frame_data = {}
|
| 356 |
+
|
| 357 |
+
# Step 1: Predict for frames at prediction_interval only
|
| 358 |
+
frames_to_predict = []
|
| 359 |
+
for frame_id in range(len(detection_results)):
|
| 360 |
+
if frame_id % prediction_interval == 0:
|
| 361 |
+
frames_to_predict.append(frame_id)
|
| 362 |
+
|
| 363 |
+
print(f"Predicting teams for {len(frames_to_predict)}/{len(detection_results)} frames "
|
| 364 |
+
f"(saving {100 - (len(frames_to_predict) * 100 // len(detection_results))}% compute)")
|
| 365 |
+
|
| 366 |
+
for frame_id in frames_to_predict:
|
| 367 |
+
detection_box = detection_results[frame_id].boxes.data
|
| 368 |
+
frame_image = decoded_images[frame_id]
|
| 369 |
+
|
| 370 |
+
# Collect player crops for this frame
|
| 371 |
+
frame_player_crops = []
|
| 372 |
+
frame_player_indices = []
|
| 373 |
+
frame_player_boxes = []
|
| 374 |
+
|
| 375 |
+
for idx, box in enumerate(detection_box):
|
| 376 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 377 |
+
if cls_id == 2 and conf < 0.6:
|
| 378 |
+
continue
|
| 379 |
+
mapped_cls_id = str(int(cls_id))
|
| 380 |
+
|
| 381 |
+
# Collect player crops for prediction
|
| 382 |
+
if self.team_classifier and self.team_classifier_fitted and mapped_cls_id == '2':
|
| 383 |
+
crop = frame_image[int(y1):int(y2), int(x1):int(x2)]
|
| 384 |
+
if crop.size > 0:
|
| 385 |
+
frame_player_crops.append(crop)
|
| 386 |
+
frame_player_indices.append(idx)
|
| 387 |
+
frame_player_boxes.append((x1, y1, x2, y2))
|
| 388 |
+
|
| 389 |
+
# Predict teams for all players in this frame
|
| 390 |
+
if len(frame_player_crops) > 0:
|
| 391 |
+
team_ids = self.team_classifier.predict(frame_player_crops)
|
| 392 |
+
predicted_frame_data[frame_id] = {}
|
| 393 |
+
for idx, bbox, team_id in zip(frame_player_indices, frame_player_boxes, team_ids):
|
| 394 |
+
# Map team_id (0,1) to cls_id (6,7)
|
| 395 |
+
team_cls_id = str(6 + int(team_id))
|
| 396 |
+
predicted_frame_data[frame_id][idx] = (bbox, team_cls_id)
|
| 397 |
+
|
| 398 |
+
# Step 2: Process all frames (interpolate skipped frames)
|
| 399 |
+
fallback_count = 0
|
| 400 |
+
interpolated_count = 0
|
| 401 |
+
bboxes: dict[int, list[BoundingBox]] = {}
|
| 402 |
+
for frame_id in range(len(detection_results)):
|
| 403 |
+
detection_box = detection_results[frame_id].boxes.data
|
| 404 |
+
frame_image = decoded_images[frame_id]
|
| 405 |
+
boxes = []
|
| 406 |
+
|
| 407 |
+
team_predictions = {}
|
| 408 |
+
|
| 409 |
+
if frame_id % prediction_interval == 0:
|
| 410 |
+
# Predicted frame: use pre-computed predictions
|
| 411 |
+
if frame_id in predicted_frame_data:
|
| 412 |
+
for idx, (bbox, team_cls_id) in predicted_frame_data[frame_id].items():
|
| 413 |
+
team_predictions[idx] = team_cls_id
|
| 414 |
+
else:
|
| 415 |
+
# Skipped frame: interpolate from neighboring predicted frames
|
| 416 |
+
# Find nearest predicted frames
|
| 417 |
+
prev_predicted_frame = (frame_id // prediction_interval) * prediction_interval
|
| 418 |
+
next_predicted_frame = prev_predicted_frame + prediction_interval
|
| 419 |
+
|
| 420 |
+
# Collect current frame player boxes and fallback crops for batch prediction
|
| 421 |
+
fallback_crops = []
|
| 422 |
+
fallback_indices = []
|
| 423 |
+
|
| 424 |
+
for idx, box in enumerate(detection_box):
|
| 425 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 426 |
+
if cls_id == 2 and conf < 0.6:
|
| 427 |
+
continue
|
| 428 |
+
mapped_cls_id = str(int(cls_id))
|
| 429 |
+
|
| 430 |
+
if self.team_classifier and self.team_classifier_fitted and mapped_cls_id == '2':
|
| 431 |
+
target_box = (x1, y1, x2, y2)
|
| 432 |
+
|
| 433 |
+
# Try to match with previous predicted frame
|
| 434 |
+
best_team_id = None
|
| 435 |
+
best_iou = 0.0
|
| 436 |
+
|
| 437 |
+
if prev_predicted_frame in predicted_frame_data:
|
| 438 |
+
team_id, iou = self._find_best_match(
|
| 439 |
+
target_box,
|
| 440 |
+
predicted_frame_data[prev_predicted_frame],
|
| 441 |
+
iou_threshold
|
| 442 |
+
)
|
| 443 |
+
if team_id is not None:
|
| 444 |
+
best_team_id = team_id
|
| 445 |
+
best_iou = iou
|
| 446 |
+
|
| 447 |
+
# Try to match with next predicted frame if available and no good match yet
|
| 448 |
+
if best_team_id is None and next_predicted_frame < len(detection_results):
|
| 449 |
+
if next_predicted_frame in predicted_frame_data:
|
| 450 |
+
team_id, iou = self._find_best_match(
|
| 451 |
+
target_box,
|
| 452 |
+
predicted_frame_data[next_predicted_frame],
|
| 453 |
+
iou_threshold
|
| 454 |
+
)
|
| 455 |
+
if team_id is not None and iou > best_iou:
|
| 456 |
+
best_team_id = team_id
|
| 457 |
+
best_iou = iou
|
| 458 |
+
|
| 459 |
+
# Track interpolation success
|
| 460 |
+
if best_team_id is not None:
|
| 461 |
+
interpolated_count += 1
|
| 462 |
+
team_predictions[idx] = best_team_id
|
| 463 |
+
else:
|
| 464 |
+
# Collect fallback crops for batch prediction
|
| 465 |
+
crop = frame_image[int(y1):int(y2), int(x1):int(x2)]
|
| 466 |
+
if crop.size > 0:
|
| 467 |
+
fallback_crops.append(crop)
|
| 468 |
+
fallback_indices.append(idx)
|
| 469 |
+
|
| 470 |
+
# Batch predict all fallback crops at once (much faster than individual calls)
|
| 471 |
+
if len(fallback_crops) > 0:
|
| 472 |
+
fallback_team_ids = self.team_classifier.predict(fallback_crops)
|
| 473 |
+
for idx, team_id in zip(fallback_indices, fallback_team_ids):
|
| 474 |
+
team_predictions[idx] = str(6 + int(team_id))
|
| 475 |
+
fallback_count += 1
|
| 476 |
+
|
| 477 |
+
# Pre-filter staff boxes once per frame (optimization)
|
| 478 |
+
staff_boxes = []
|
| 479 |
+
for idy, boxy in enumerate(detection_box):
|
| 480 |
+
s_x1, s_y1, s_x2, s_y2, s_conf, s_cls_id = boxy.tolist()
|
| 481 |
+
if s_cls_id == 4:
|
| 482 |
+
staff_boxes.append((s_x1, s_y1, s_x2, s_y2))
|
| 483 |
+
|
| 484 |
+
# Pre-compute player boxes for vectorized staff overlap check (if many players)
|
| 485 |
+
player_boxes_for_staff_check = []
|
| 486 |
+
player_indices_for_staff_check = []
|
| 487 |
+
if len(staff_boxes) > 0:
|
| 488 |
+
for idx, box in enumerate(detection_box):
|
| 489 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 490 |
+
if cls_id == 2 and conf >= 0.6:
|
| 491 |
+
player_boxes_for_staff_check.append((x1, y1, x2, y2))
|
| 492 |
+
player_indices_for_staff_check.append(idx)
|
| 493 |
+
|
| 494 |
+
# Vectorized staff overlap check if we have players and staff
|
| 495 |
+
staff_overlap_mask = set()
|
| 496 |
+
if len(staff_boxes) > 0 and len(player_boxes_for_staff_check) > 0:
|
| 497 |
+
# Use vectorized IoU calculation for all player-staff pairs
|
| 498 |
+
staff_array = np.array(staff_boxes, dtype=np.float32)
|
| 499 |
+
player_array = np.array(player_boxes_for_staff_check, dtype=np.float32)
|
| 500 |
+
|
| 501 |
+
# Broadcast to compute all pairwise IoUs
|
| 502 |
+
for player_idx, player_box in enumerate(player_boxes_for_staff_check):
|
| 503 |
+
p_x1, p_y1, p_x2, p_y2 = player_box
|
| 504 |
+
s_x1 = staff_array[:, 0]
|
| 505 |
+
s_y1 = staff_array[:, 1]
|
| 506 |
+
s_x2 = staff_array[:, 2]
|
| 507 |
+
s_y2 = staff_array[:, 3]
|
| 508 |
+
|
| 509 |
+
# Vectorized IoU calculation
|
| 510 |
+
x_left = np.maximum(p_x1, s_x1)
|
| 511 |
+
y_top = np.maximum(p_y1, s_y1)
|
| 512 |
+
x_right = np.minimum(p_x2, s_x2)
|
| 513 |
+
y_bottom = np.minimum(p_y2, s_y2)
|
| 514 |
+
|
| 515 |
+
intersection = np.maximum(0, x_right - x_left) * np.maximum(0, y_bottom - y_top)
|
| 516 |
+
player_area = (p_x2 - p_x1) * (p_y2 - p_y1)
|
| 517 |
+
staff_areas = (s_x2 - s_x1) * (s_y2 - s_y1)
|
| 518 |
+
union = player_area + staff_areas - intersection
|
| 519 |
+
|
| 520 |
+
ious = np.where(union > 0, intersection / union, 0.0)
|
| 521 |
+
if np.any(ious >= 0.8):
|
| 522 |
+
staff_overlap_mask.add(player_indices_for_staff_check[player_idx])
|
| 523 |
+
|
| 524 |
+
# Parse boxes with team classification
|
| 525 |
+
for idx, box in enumerate(detection_box):
|
| 526 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 527 |
+
if cls_id == 2 and conf < 0.6:
|
| 528 |
+
continue
|
| 529 |
+
|
| 530 |
+
# Check overlap with staff box (using pre-computed mask)
|
| 531 |
+
if idx in staff_overlap_mask:
|
| 532 |
+
continue
|
| 533 |
+
|
| 534 |
+
mapped_cls_id = str(int(cls_id))
|
| 535 |
+
|
| 536 |
+
# Override cls_id for players with team prediction
|
| 537 |
+
if idx in team_predictions:
|
| 538 |
+
mapped_cls_id = team_predictions[idx]
|
| 539 |
+
if mapped_cls_id != '4':
|
| 540 |
+
if int(mapped_cls_id) == 3 and conf < 0.5:
|
| 541 |
+
continue
|
| 542 |
+
boxes.append(
|
| 543 |
+
BoundingBox(
|
| 544 |
+
x1=int(x1),
|
| 545 |
+
y1=int(y1),
|
| 546 |
+
x2=int(x2),
|
| 547 |
+
y2=int(y2),
|
| 548 |
+
cls_id=int(mapped_cls_id),
|
| 549 |
+
conf=float(conf),
|
| 550 |
+
)
|
| 551 |
+
)
|
| 552 |
+
# Handle footballs - keep only the best one
|
| 553 |
+
footballs = [bb for bb in boxes if int(bb.cls_id) == 0]
|
| 554 |
+
if len(footballs) > 1:
|
| 555 |
+
best_ball = max(footballs, key=lambda b: b.conf)
|
| 556 |
+
boxes = [bb for bb in boxes if int(bb.cls_id) != 0]
|
| 557 |
+
boxes.append(best_ball)
|
| 558 |
+
|
| 559 |
+
bboxes[offset + frame_id] = boxes
|
| 560 |
+
return bboxes
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
def predict_batch(self, batch_images: List[ndarray], offset: int, n_keypoints: int) -> List[TVFrameResult]:
|
| 564 |
+
start = time.time()
|
| 565 |
+
detection_results = self._detect_objects_batch(batch_images)
|
| 566 |
+
end = time.time()
|
| 567 |
+
print(f"Detection time: {end - start}")
|
| 568 |
+
|
| 569 |
+
# Use hybrid team classification
|
| 570 |
+
start = time.time()
|
| 571 |
+
bboxes = self._team_classify(detection_results, batch_images, offset)
|
| 572 |
+
end = time.time()
|
| 573 |
+
print(f"Team classify time: {end - start}")
|
| 574 |
+
|
| 575 |
+
# Phase 3: Keypoint Detection
|
| 576 |
+
start = time.time()
|
| 577 |
+
|
| 578 |
+
|
| 579 |
+
pitch_batch_size = min(self.pitch_batch_size, len(batch_images))
|
| 580 |
+
keypoints: Dict[int, List[Tuple[int, int]]] = {}
|
| 581 |
+
|
| 582 |
+
start = time.time()
|
| 583 |
+
|
| 584 |
+
while True:
|
| 585 |
+
gc.collect()
|
| 586 |
+
if torch.cuda.is_available():
|
| 587 |
+
torch.cuda.empty_cache()
|
| 588 |
+
torch.cuda.synchronize()
|
| 589 |
+
device_str = "cuda"
|
| 590 |
+
keypoints_result = process_batch_input(
|
| 591 |
+
batch_images,
|
| 592 |
+
self.keypoints_model,
|
| 593 |
+
self.kp_threshold,
|
| 594 |
+
device_str,
|
| 595 |
+
batch_size=pitch_batch_size,
|
| 596 |
+
)
|
| 597 |
+
if keypoints_result is not None and len(keypoints_result) > 0:
|
| 598 |
+
for frame_number_in_batch, kp_dict in enumerate(keypoints_result):
|
| 599 |
+
if frame_number_in_batch >= len(batch_images):
|
| 600 |
+
break
|
| 601 |
+
frame_keypoints: List[Tuple[int, int]] = []
|
| 602 |
+
try:
|
| 603 |
+
height, width = batch_images[frame_number_in_batch].shape[:2]
|
| 604 |
+
if kp_dict is not None and isinstance(kp_dict, dict):
|
| 605 |
+
for idx in range(32):
|
| 606 |
+
x, y = 0, 0
|
| 607 |
+
kp_idx = idx + 1
|
| 608 |
+
if kp_idx in kp_dict:
|
| 609 |
+
try:
|
| 610 |
+
kp_data = kp_dict[kp_idx]
|
| 611 |
+
if isinstance(kp_data, dict) and "x" in kp_data and "y" in kp_data:
|
| 612 |
+
x = int(kp_data["x"] * width)
|
| 613 |
+
y = int(kp_data["y"] * height)
|
| 614 |
+
except (KeyError, TypeError, ValueError):
|
| 615 |
+
pass
|
| 616 |
+
frame_keypoints.append((x, y))
|
| 617 |
+
except (IndexError, ValueError, AttributeError):
|
| 618 |
+
frame_keypoints = [(0, 0)] * 32
|
| 619 |
+
if len(frame_keypoints) < n_keypoints:
|
| 620 |
+
frame_keypoints.extend([(0, 0)] * (n_keypoints - len(frame_keypoints)))
|
| 621 |
+
else:
|
| 622 |
+
frame_keypoints = frame_keypoints[:n_keypoints]
|
| 623 |
+
|
| 624 |
+
keypoints[offset + frame_number_in_batch] = frame_keypoints
|
| 625 |
+
break
|
| 626 |
+
end = time.time()
|
| 627 |
+
print(f"Keypoint time: {end - start}")
|
| 628 |
+
|
| 629 |
+
results: List[TVFrameResult] = []
|
| 630 |
+
for frame_number in range(offset, offset + len(batch_images)):
|
| 631 |
+
frame_boxes = bboxes.get(frame_number, [])
|
| 632 |
+
result = TVFrameResult(
|
| 633 |
+
frame_id=frame_number,
|
| 634 |
+
boxes=frame_boxes,
|
| 635 |
+
keypoints=keypoints.get(
|
| 636 |
+
frame_number,
|
| 637 |
+
[(0, 0) for _ in range(n_keypoints)],
|
| 638 |
+
),
|
| 639 |
+
)
|
| 640 |
+
results.append(result)
|
| 641 |
+
|
| 642 |
+
start = time.time()
|
| 643 |
+
if len(batch_images) > 0:
|
| 644 |
+
h, w = batch_images[0].shape[:2]
|
| 645 |
+
results = run_keypoints_post_processing(
|
| 646 |
+
results, w, h,
|
| 647 |
+
frames=batch_images,
|
| 648 |
+
offset=offset,
|
| 649 |
+
template_keypoints=self.template_keypoints,
|
| 650 |
+
template_image=self.template_image,
|
| 651 |
+
)
|
| 652 |
+
end = time.time()
|
| 653 |
+
print(f"Keypoint post processing time: {end - start}")
|
| 654 |
+
|
| 655 |
+
final_keypoints: Dict[int, List[Tuple[int, int]]] = {}
|
| 656 |
+
|
| 657 |
+
for frame_number_in_batch, result in enumerate(results):
|
| 658 |
+
frame_keypoints = result.keypoints
|
| 659 |
+
try:
|
| 660 |
+
if self.last_valid_keypoints is None:
|
| 661 |
+
self.last_valid_keypoints = final_keypoints.get(offset + frame_number_in_batch - 1, self.last_valid_keypoints)
|
| 662 |
+
# Evaluate both keypoint sets in batch (much faster!)
|
| 663 |
+
scores = evaluate_keypoints_batch_for_frame(
|
| 664 |
+
template_keypoints=self.template_keypoints,
|
| 665 |
+
frame_keypoints_list=[result.keypoints, self.last_valid_keypoints],
|
| 666 |
+
frame=batch_images[frame_number_in_batch],
|
| 667 |
+
floor_markings_template=self.template_image,
|
| 668 |
+
device="cuda"
|
| 669 |
+
)
|
| 670 |
+
score = scores[0]
|
| 671 |
+
self.last_score = scores[1]
|
| 672 |
+
|
| 673 |
+
if self.last_score > score:
|
| 674 |
+
frame_keypoints = self.last_valid_keypoints
|
| 675 |
+
else:
|
| 676 |
+
self.last_score = score
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
except Exception as e:
|
| 680 |
+
# Fallback: use YOLO if available, otherwise use pitch model
|
| 681 |
+
print('Error: ', e)
|
| 682 |
+
|
| 683 |
+
self.last_valid_keypoints = frame_keypoints
|
| 684 |
+
|
| 685 |
+
final_keypoints[offset + frame_number_in_batch] = frame_keypoints
|
| 686 |
+
|
| 687 |
+
|
| 688 |
+
final_results: List[TVFrameResult] = []
|
| 689 |
+
for frame_number in range(offset, offset + len(batch_images)):
|
| 690 |
+
frame_boxes = bboxes.get(frame_number, [])
|
| 691 |
+
result = TVFrameResult(
|
| 692 |
+
frame_id=frame_number,
|
| 693 |
+
boxes=frame_boxes,
|
| 694 |
+
keypoints=final_keypoints.get(
|
| 695 |
+
frame_number,
|
| 696 |
+
[(0, 0) for _ in range(n_keypoints)],
|
| 697 |
+
),
|
| 698 |
+
)
|
| 699 |
+
final_results.append(result)
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
gc.collect()
|
| 703 |
+
if torch.cuda.is_available():
|
| 704 |
+
torch.cuda.empty_cache()
|
| 705 |
+
torch.cuda.synchronize()
|
| 706 |
+
|
| 707 |
+
return final_results
|
| 708 |
+
# return results
|
| 709 |
+
|
| 710 |
+
def _detect_keypoints_batch(self, batch_images: List[ndarray],
|
| 711 |
+
offset: int, n_keypoints: int) -> Dict[int, List[Tuple[int, int]]]:
|
| 712 |
+
"""
|
| 713 |
+
Phase 3: Keypoint detection for all frames in batch.
|
| 714 |
+
|
| 715 |
+
Args:
|
| 716 |
+
batch_images: List of images to process
|
| 717 |
+
offset: Frame offset for numbering
|
| 718 |
+
n_keypoints: Number of keypoints expected
|
| 719 |
+
|
| 720 |
+
Returns:
|
| 721 |
+
Dictionary mapping frame_id to list of keypoint coordinates
|
| 722 |
+
"""
|
| 723 |
+
keypoints: Dict[int, List[Tuple[int, int]]] = {}
|
| 724 |
+
keypoints_model_results = self.keypoints_model_yolo.predict(batch_images)
|
| 725 |
+
|
| 726 |
+
if keypoints_model_results is None:
|
| 727 |
+
return keypoints
|
| 728 |
+
|
| 729 |
+
for frame_idx_in_batch, detection in enumerate(keypoints_model_results):
|
| 730 |
+
if not hasattr(detection, "keypoints") or detection.keypoints is None:
|
| 731 |
+
continue
|
| 732 |
+
|
| 733 |
+
# Extract keypoints with confidence
|
| 734 |
+
frame_keypoints_with_conf: List[Tuple[int, int, float]] = []
|
| 735 |
+
for i, part_points in enumerate(detection.keypoints.data):
|
| 736 |
+
for k_id, (x, y, _) in enumerate(part_points):
|
| 737 |
+
confidence = float(detection.keypoints.conf[i][k_id])
|
| 738 |
+
frame_keypoints_with_conf.append((int(x), int(y), confidence))
|
| 739 |
+
|
| 740 |
+
# Pad or truncate to expected number of keypoints
|
| 741 |
+
if len(frame_keypoints_with_conf) < n_keypoints:
|
| 742 |
+
frame_keypoints_with_conf.extend(
|
| 743 |
+
[(0, 0, 0.0)] * (n_keypoints - len(frame_keypoints_with_conf))
|
| 744 |
+
)
|
| 745 |
+
else:
|
| 746 |
+
frame_keypoints_with_conf = frame_keypoints_with_conf[:n_keypoints]
|
| 747 |
+
|
| 748 |
+
# Filter keypoints based on confidence thresholds
|
| 749 |
+
filtered_keypoints: List[Tuple[int, int]] = []
|
| 750 |
+
for idx, (x, y, confidence) in enumerate(frame_keypoints_with_conf):
|
| 751 |
+
if idx in self.CORNER_INDICES:
|
| 752 |
+
# Corner keypoints have lower confidence threshold
|
| 753 |
+
if confidence < 0.3:
|
| 754 |
+
filtered_keypoints.append((0, 0))
|
| 755 |
+
else:
|
| 756 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 757 |
+
else:
|
| 758 |
+
# Regular keypoints
|
| 759 |
+
if confidence < 0.5:
|
| 760 |
+
filtered_keypoints.append((0, 0))
|
| 761 |
+
else:
|
| 762 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 763 |
+
|
| 764 |
+
frame_id = offset + frame_idx_in_batch
|
| 765 |
+
keypoints[frame_id] = filtered_keypoints
|
| 766 |
+
|
| 767 |
+
return keypoints
|
| 768 |
+
|
| 769 |
+
def predict_keypoints(
|
| 770 |
+
self,
|
| 771 |
+
images: List[ndarray],
|
| 772 |
+
n_keypoints: int = 32,
|
| 773 |
+
batch_size: Optional[int] = None,
|
| 774 |
+
conf_threshold: float = 0.5,
|
| 775 |
+
corner_conf_threshold: float = 0.3,
|
| 776 |
+
verbose: bool = False
|
| 777 |
+
) -> Dict[int, List[Tuple[int, int]]]:
|
| 778 |
+
"""
|
| 779 |
+
Standalone function for keypoint detection on a list of images.
|
| 780 |
+
Optimized for maximum prediction speed.
|
| 781 |
+
|
| 782 |
+
Args:
|
| 783 |
+
images: List of images (numpy arrays) to process
|
| 784 |
+
n_keypoints: Number of keypoints expected per frame (default: 32)
|
| 785 |
+
batch_size: Batch size for YOLO prediction (None = auto, uses all images)
|
| 786 |
+
conf_threshold: Confidence threshold for regular keypoints (default: 0.5)
|
| 787 |
+
corner_conf_threshold: Confidence threshold for corner keypoints (default: 0.3)
|
| 788 |
+
verbose: Whether to print progress information
|
| 789 |
+
|
| 790 |
+
Returns:
|
| 791 |
+
Dictionary mapping frame index to list of keypoint coordinates (x, y)
|
| 792 |
+
Frame indices start from 0
|
| 793 |
+
"""
|
| 794 |
+
if not images:
|
| 795 |
+
return {}
|
| 796 |
+
|
| 797 |
+
keypoints: Dict[int, List[Tuple[int, int]]] = {}
|
| 798 |
+
|
| 799 |
+
# Use provided batch_size or process all at once for maximum speed
|
| 800 |
+
if batch_size is None:
|
| 801 |
+
batch_size = len(images)
|
| 802 |
+
|
| 803 |
+
# Process in batches for optimal GPU utilization
|
| 804 |
+
for batch_start in range(0, len(images), batch_size):
|
| 805 |
+
batch_end = min(batch_start + batch_size, len(images))
|
| 806 |
+
batch_images = images[batch_start:batch_end]
|
| 807 |
+
|
| 808 |
+
if verbose:
|
| 809 |
+
print(f"Processing keypoints batch {batch_start}-{batch_end-1} ({len(batch_images)} images)")
|
| 810 |
+
|
| 811 |
+
# YOLO keypoint prediction (optimized batch processing)
|
| 812 |
+
keypoints_model_results = self.keypoints_model_yolo.predict(
|
| 813 |
+
batch_images,
|
| 814 |
+
verbose=False,
|
| 815 |
+
save=False,
|
| 816 |
+
conf=0.1, # Lower conf for detection, we filter later
|
| 817 |
+
)
|
| 818 |
+
|
| 819 |
+
if keypoints_model_results is None:
|
| 820 |
+
# Fill with empty keypoints for this batch
|
| 821 |
+
for frame_idx in range(batch_start, batch_end):
|
| 822 |
+
keypoints[frame_idx] = [(0, 0)] * n_keypoints
|
| 823 |
+
continue
|
| 824 |
+
|
| 825 |
+
# Process each frame in the batch
|
| 826 |
+
for batch_idx, detection in enumerate(keypoints_model_results):
|
| 827 |
+
frame_idx = batch_start + batch_idx
|
| 828 |
+
|
| 829 |
+
if not hasattr(detection, "keypoints") or detection.keypoints is None:
|
| 830 |
+
keypoints[frame_idx] = [(0, 0)] * n_keypoints
|
| 831 |
+
continue
|
| 832 |
+
|
| 833 |
+
# Extract keypoints with confidence
|
| 834 |
+
frame_keypoints_with_conf: List[Tuple[int, int, float]] = []
|
| 835 |
+
try:
|
| 836 |
+
for i, part_points in enumerate(detection.keypoints.data):
|
| 837 |
+
for k_id, (x, y, _) in enumerate(part_points):
|
| 838 |
+
confidence = float(detection.keypoints.conf[i][k_id])
|
| 839 |
+
frame_keypoints_with_conf.append((int(x), int(y), confidence))
|
| 840 |
+
except (AttributeError, IndexError, TypeError):
|
| 841 |
+
keypoints[frame_idx] = [(0, 0)] * n_keypoints
|
| 842 |
+
continue
|
| 843 |
+
|
| 844 |
+
# Pad or truncate to expected number of keypoints
|
| 845 |
+
if len(frame_keypoints_with_conf) < n_keypoints:
|
| 846 |
+
frame_keypoints_with_conf.extend(
|
| 847 |
+
[(0, 0, 0.0)] * (n_keypoints - len(frame_keypoints_with_conf))
|
| 848 |
+
)
|
| 849 |
+
else:
|
| 850 |
+
frame_keypoints_with_conf = frame_keypoints_with_conf[:n_keypoints]
|
| 851 |
+
|
| 852 |
+
# Filter keypoints based on confidence thresholds
|
| 853 |
+
filtered_keypoints: List[Tuple[int, int]] = []
|
| 854 |
+
for idx, (x, y, confidence) in enumerate(frame_keypoints_with_conf):
|
| 855 |
+
if idx in self.CORNER_INDICES:
|
| 856 |
+
# Corner keypoints have lower confidence threshold
|
| 857 |
+
if confidence < corner_conf_threshold:
|
| 858 |
+
filtered_keypoints.append((0, 0))
|
| 859 |
+
else:
|
| 860 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 861 |
+
else:
|
| 862 |
+
# Regular keypoints
|
| 863 |
+
if confidence < conf_threshold:
|
| 864 |
+
filtered_keypoints.append((0, 0))
|
| 865 |
+
else:
|
| 866 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 867 |
+
|
| 868 |
+
keypoints[frame_idx] = filtered_keypoints
|
| 869 |
+
|
| 870 |
+
return keypoints
|
| 871 |
+
|
| 872 |
+
def predict_objects(
|
| 873 |
+
self,
|
| 874 |
+
images: List[ndarray],
|
| 875 |
+
batch_size: Optional[int] = 16,
|
| 876 |
+
conf_threshold: float = 0.5,
|
| 877 |
+
iou_threshold: float = 0.45,
|
| 878 |
+
classes: Optional[List[int]] = None,
|
| 879 |
+
verbose: bool = False,
|
| 880 |
+
) -> Dict[int, List[BoundingBox]]:
|
| 881 |
+
"""
|
| 882 |
+
Standalone high-throughput object detection function.
|
| 883 |
+
Runs the YOLO detector directly on raw images while skipping
|
| 884 |
+
any team-classification or keypoint stages for maximum FPS.
|
| 885 |
+
|
| 886 |
+
Args:
|
| 887 |
+
images: List of frames (BGR numpy arrays).
|
| 888 |
+
batch_size: Number of frames per inference pass. Use None to process
|
| 889 |
+
all frames at once (fastest but highest memory usage).
|
| 890 |
+
conf_threshold: Detection confidence threshold.
|
| 891 |
+
iou_threshold: IoU threshold for NMS within YOLO.
|
| 892 |
+
classes: Optional list of class IDs to keep (None = all classes).
|
| 893 |
+
verbose: Whether to print per-batch progress from YOLO.
|
| 894 |
+
|
| 895 |
+
Returns:
|
| 896 |
+
Dict mapping frame index -> list of BoundingBox predictions.
|
| 897 |
+
"""
|
| 898 |
+
if not images:
|
| 899 |
+
return {}
|
| 900 |
+
|
| 901 |
+
detections: Dict[int, List[BoundingBox]] = {}
|
| 902 |
+
effective_batch = len(images) if batch_size is None else max(1, batch_size)
|
| 903 |
+
|
| 904 |
+
for batch_start in range(0, len(images), effective_batch):
|
| 905 |
+
batch_end = min(batch_start + effective_batch, len(images))
|
| 906 |
+
batch_images = images[batch_start:batch_end]
|
| 907 |
+
|
| 908 |
+
start = time.time()
|
| 909 |
+
yolo_results = self.bbox_model(
|
| 910 |
+
batch_images,
|
| 911 |
+
conf=conf_threshold,
|
| 912 |
+
iou=iou_threshold,
|
| 913 |
+
classes=classes,
|
| 914 |
+
verbose=verbose,
|
| 915 |
+
save=False,
|
| 916 |
+
)
|
| 917 |
+
end = time.time()
|
| 918 |
+
print(f"YOLO time: {end - start}")
|
| 919 |
+
|
| 920 |
+
for local_idx, result in enumerate(yolo_results):
|
| 921 |
+
frame_idx = batch_start + local_idx
|
| 922 |
+
frame_boxes: List[BoundingBox] = []
|
| 923 |
+
|
| 924 |
+
if not hasattr(result, "boxes") or result.boxes is None:
|
| 925 |
+
detections[frame_idx] = frame_boxes
|
| 926 |
+
continue
|
| 927 |
+
|
| 928 |
+
boxes_tensor = result.boxes.data
|
| 929 |
+
if boxes_tensor is None:
|
| 930 |
+
detections[frame_idx] = frame_boxes
|
| 931 |
+
continue
|
| 932 |
+
|
| 933 |
+
for box in boxes_tensor:
|
| 934 |
+
try:
|
| 935 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 936 |
+
frame_boxes.append(
|
| 937 |
+
BoundingBox(
|
| 938 |
+
x1=int(x1),
|
| 939 |
+
y1=int(y1),
|
| 940 |
+
x2=int(x2),
|
| 941 |
+
y2=int(y2),
|
| 942 |
+
cls_id=int(cls_id),
|
| 943 |
+
conf=float(conf),
|
| 944 |
+
)
|
| 945 |
+
)
|
| 946 |
+
except (ValueError, TypeError):
|
| 947 |
+
continue
|
| 948 |
+
|
| 949 |
+
detections[frame_idx] = frame_boxes
|
| 950 |
+
|
| 951 |
+
return detections
|
| 952 |
+
|
object-detection.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:05112479be8cb59494e9ae23a57af43becd5aa1f448b0e5ed33fcb6b4c2bbbc3
|
| 3 |
+
size 273322667
|
osnet_ain.pyc
ADDED
|
Binary file (24.2 kB). View file
|
|
|
osnet_model.pth.tar-100
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:64873ef0e8abf28df31facd113f27634e2d085a2dcf8d19123409b1d0e2566c8
|
| 3 |
+
size 36189526
|
pitch.py
ADDED
|
@@ -0,0 +1,687 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import absolute_import
|
| 2 |
+
from __future__ import division
|
| 3 |
+
from __future__ import print_function
|
| 4 |
+
|
| 5 |
+
import os
|
| 6 |
+
import sys
|
| 7 |
+
import time
|
| 8 |
+
from typing import List, Optional, Tuple
|
| 9 |
+
|
| 10 |
+
import cv2
|
| 11 |
+
import numpy as np
|
| 12 |
+
import torch
|
| 13 |
+
import torch.nn as nn
|
| 14 |
+
import torch.nn.functional as F
|
| 15 |
+
import torchvision.transforms as T
|
| 16 |
+
import torchvision.transforms.functional as f
|
| 17 |
+
from pydantic import BaseModel
|
| 18 |
+
|
| 19 |
+
import logging
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class BoundingBox(BaseModel):
|
| 24 |
+
x1: int
|
| 25 |
+
y1: int
|
| 26 |
+
x2: int
|
| 27 |
+
y2: int
|
| 28 |
+
cls_id: int
|
| 29 |
+
conf: float
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class TVFrameResult(BaseModel):
|
| 33 |
+
frame_id: int
|
| 34 |
+
boxes: list[BoundingBox]
|
| 35 |
+
keypoints: list[tuple[int, int]]
|
| 36 |
+
|
| 37 |
+
BatchNorm2d = nn.BatchNorm2d
|
| 38 |
+
BN_MOMENTUM = 0.1
|
| 39 |
+
|
| 40 |
+
def conv3x3(in_planes, out_planes, stride=1):
|
| 41 |
+
"""3x3 convolution with padding"""
|
| 42 |
+
return nn.Conv2d(in_planes, out_planes, kernel_size=3,
|
| 43 |
+
stride=stride, padding=1, bias=False)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class BasicBlock(nn.Module):
|
| 47 |
+
expansion = 1
|
| 48 |
+
|
| 49 |
+
def __init__(self, inplanes, planes, stride=1, downsample=None):
|
| 50 |
+
super(BasicBlock, self).__init__()
|
| 51 |
+
self.conv1 = conv3x3(inplanes, planes, stride)
|
| 52 |
+
self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
|
| 53 |
+
self.relu = nn.ReLU(inplace=True)
|
| 54 |
+
self.conv2 = conv3x3(planes, planes)
|
| 55 |
+
self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
|
| 56 |
+
self.downsample = downsample
|
| 57 |
+
self.stride = stride
|
| 58 |
+
|
| 59 |
+
def forward(self, x):
|
| 60 |
+
residual = x
|
| 61 |
+
|
| 62 |
+
out = self.conv1(x)
|
| 63 |
+
out = self.bn1(out)
|
| 64 |
+
out = self.relu(out)
|
| 65 |
+
|
| 66 |
+
out = self.conv2(out)
|
| 67 |
+
out = self.bn2(out)
|
| 68 |
+
|
| 69 |
+
if self.downsample is not None:
|
| 70 |
+
residual = self.downsample(x)
|
| 71 |
+
|
| 72 |
+
out += residual
|
| 73 |
+
out = self.relu(out)
|
| 74 |
+
|
| 75 |
+
return out
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class Bottleneck(nn.Module):
|
| 79 |
+
expansion = 4
|
| 80 |
+
|
| 81 |
+
def __init__(self, inplanes, planes, stride=1, downsample=None):
|
| 82 |
+
super(Bottleneck, self).__init__()
|
| 83 |
+
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
|
| 84 |
+
self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
|
| 85 |
+
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
|
| 86 |
+
padding=1, bias=False)
|
| 87 |
+
self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
|
| 88 |
+
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
|
| 89 |
+
bias=False)
|
| 90 |
+
self.bn3 = BatchNorm2d(planes * self.expansion,
|
| 91 |
+
momentum=BN_MOMENTUM)
|
| 92 |
+
self.relu = nn.ReLU(inplace=True)
|
| 93 |
+
self.downsample = downsample
|
| 94 |
+
self.stride = stride
|
| 95 |
+
|
| 96 |
+
def forward(self, x):
|
| 97 |
+
residual = x
|
| 98 |
+
|
| 99 |
+
out = self.conv1(x)
|
| 100 |
+
out = self.bn1(out)
|
| 101 |
+
out = self.relu(out)
|
| 102 |
+
|
| 103 |
+
out = self.conv2(out)
|
| 104 |
+
out = self.bn2(out)
|
| 105 |
+
out = self.relu(out)
|
| 106 |
+
|
| 107 |
+
out = self.conv3(out)
|
| 108 |
+
out = self.bn3(out)
|
| 109 |
+
|
| 110 |
+
if self.downsample is not None:
|
| 111 |
+
residual = self.downsample(x)
|
| 112 |
+
|
| 113 |
+
out += residual
|
| 114 |
+
out = self.relu(out)
|
| 115 |
+
|
| 116 |
+
return out
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class HighResolutionModule(nn.Module):
|
| 120 |
+
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
|
| 121 |
+
num_channels, fuse_method, multi_scale_output=True):
|
| 122 |
+
super(HighResolutionModule, self).__init__()
|
| 123 |
+
self._check_branches(
|
| 124 |
+
num_branches, blocks, num_blocks, num_inchannels, num_channels)
|
| 125 |
+
|
| 126 |
+
self.num_inchannels = num_inchannels
|
| 127 |
+
self.fuse_method = fuse_method
|
| 128 |
+
self.num_branches = num_branches
|
| 129 |
+
|
| 130 |
+
self.multi_scale_output = multi_scale_output
|
| 131 |
+
|
| 132 |
+
self.branches = self._make_branches(
|
| 133 |
+
num_branches, blocks, num_blocks, num_channels)
|
| 134 |
+
self.fuse_layers = self._make_fuse_layers()
|
| 135 |
+
self.relu = nn.ReLU(inplace=True)
|
| 136 |
+
|
| 137 |
+
def _check_branches(self, num_branches, blocks, num_blocks,
|
| 138 |
+
num_inchannels, num_channels):
|
| 139 |
+
if num_branches != len(num_blocks):
|
| 140 |
+
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
|
| 141 |
+
num_branches, len(num_blocks))
|
| 142 |
+
logger.error(error_msg)
|
| 143 |
+
raise ValueError(error_msg)
|
| 144 |
+
|
| 145 |
+
if num_branches != len(num_channels):
|
| 146 |
+
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
|
| 147 |
+
num_branches, len(num_channels))
|
| 148 |
+
logger.error(error_msg)
|
| 149 |
+
raise ValueError(error_msg)
|
| 150 |
+
|
| 151 |
+
if num_branches != len(num_inchannels):
|
| 152 |
+
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
|
| 153 |
+
num_branches, len(num_inchannels))
|
| 154 |
+
logger.error(error_msg)
|
| 155 |
+
raise ValueError(error_msg)
|
| 156 |
+
|
| 157 |
+
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
|
| 158 |
+
stride=1):
|
| 159 |
+
downsample = None
|
| 160 |
+
if stride != 1 or \
|
| 161 |
+
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
|
| 162 |
+
downsample = nn.Sequential(
|
| 163 |
+
nn.Conv2d(self.num_inchannels[branch_index],
|
| 164 |
+
num_channels[branch_index] * block.expansion,
|
| 165 |
+
kernel_size=1, stride=stride, bias=False),
|
| 166 |
+
BatchNorm2d(num_channels[branch_index] * block.expansion,
|
| 167 |
+
momentum=BN_MOMENTUM),
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
layers = []
|
| 171 |
+
layers.append(block(self.num_inchannels[branch_index],
|
| 172 |
+
num_channels[branch_index], stride, downsample))
|
| 173 |
+
self.num_inchannels[branch_index] = \
|
| 174 |
+
num_channels[branch_index] * block.expansion
|
| 175 |
+
for i in range(1, num_blocks[branch_index]):
|
| 176 |
+
layers.append(block(self.num_inchannels[branch_index],
|
| 177 |
+
num_channels[branch_index]))
|
| 178 |
+
|
| 179 |
+
return nn.Sequential(*layers)
|
| 180 |
+
|
| 181 |
+
def _make_branches(self, num_branches, block, num_blocks, num_channels):
|
| 182 |
+
branches = []
|
| 183 |
+
|
| 184 |
+
for i in range(num_branches):
|
| 185 |
+
branches.append(
|
| 186 |
+
self._make_one_branch(i, block, num_blocks, num_channels))
|
| 187 |
+
|
| 188 |
+
return nn.ModuleList(branches)
|
| 189 |
+
|
| 190 |
+
def _make_fuse_layers(self):
|
| 191 |
+
if self.num_branches == 1:
|
| 192 |
+
return None
|
| 193 |
+
|
| 194 |
+
num_branches = self.num_branches
|
| 195 |
+
num_inchannels = self.num_inchannels
|
| 196 |
+
fuse_layers = []
|
| 197 |
+
for i in range(num_branches if self.multi_scale_output else 1):
|
| 198 |
+
fuse_layer = []
|
| 199 |
+
for j in range(num_branches):
|
| 200 |
+
if j > i:
|
| 201 |
+
fuse_layer.append(nn.Sequential(
|
| 202 |
+
nn.Conv2d(num_inchannels[j],
|
| 203 |
+
num_inchannels[i],
|
| 204 |
+
1,
|
| 205 |
+
1,
|
| 206 |
+
0,
|
| 207 |
+
bias=False),
|
| 208 |
+
BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM)))
|
| 209 |
+
# nn.Upsample(scale_factor=2**(j-i), mode='nearest')))
|
| 210 |
+
elif j == i:
|
| 211 |
+
fuse_layer.append(None)
|
| 212 |
+
else:
|
| 213 |
+
conv3x3s = []
|
| 214 |
+
for k in range(i - j):
|
| 215 |
+
if k == i - j - 1:
|
| 216 |
+
num_outchannels_conv3x3 = num_inchannels[i]
|
| 217 |
+
conv3x3s.append(nn.Sequential(
|
| 218 |
+
nn.Conv2d(num_inchannels[j],
|
| 219 |
+
num_outchannels_conv3x3,
|
| 220 |
+
3, 2, 1, bias=False),
|
| 221 |
+
BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM)))
|
| 222 |
+
else:
|
| 223 |
+
num_outchannels_conv3x3 = num_inchannels[j]
|
| 224 |
+
conv3x3s.append(nn.Sequential(
|
| 225 |
+
nn.Conv2d(num_inchannels[j],
|
| 226 |
+
num_outchannels_conv3x3,
|
| 227 |
+
3, 2, 1, bias=False),
|
| 228 |
+
BatchNorm2d(num_outchannels_conv3x3,
|
| 229 |
+
momentum=BN_MOMENTUM),
|
| 230 |
+
nn.ReLU(inplace=True)))
|
| 231 |
+
fuse_layer.append(nn.Sequential(*conv3x3s))
|
| 232 |
+
fuse_layers.append(nn.ModuleList(fuse_layer))
|
| 233 |
+
|
| 234 |
+
return nn.ModuleList(fuse_layers)
|
| 235 |
+
|
| 236 |
+
def get_num_inchannels(self):
|
| 237 |
+
return self.num_inchannels
|
| 238 |
+
|
| 239 |
+
def forward(self, x):
|
| 240 |
+
if self.num_branches == 1:
|
| 241 |
+
return [self.branches[0](x[0])]
|
| 242 |
+
|
| 243 |
+
for i in range(self.num_branches):
|
| 244 |
+
x[i] = self.branches[i](x[i])
|
| 245 |
+
|
| 246 |
+
x_fuse = []
|
| 247 |
+
for i in range(len(self.fuse_layers)):
|
| 248 |
+
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
|
| 249 |
+
for j in range(1, self.num_branches):
|
| 250 |
+
if i == j:
|
| 251 |
+
y = y + x[j]
|
| 252 |
+
elif j > i:
|
| 253 |
+
y = y + F.interpolate(
|
| 254 |
+
self.fuse_layers[i][j](x[j]),
|
| 255 |
+
size=[x[i].shape[2], x[i].shape[3]],
|
| 256 |
+
mode='bilinear')
|
| 257 |
+
else:
|
| 258 |
+
y = y + self.fuse_layers[i][j](x[j])
|
| 259 |
+
x_fuse.append(self.relu(y))
|
| 260 |
+
|
| 261 |
+
return x_fuse
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
blocks_dict = {
|
| 265 |
+
'BASIC': BasicBlock,
|
| 266 |
+
'BOTTLENECK': Bottleneck
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
class HighResolutionNet(nn.Module):
|
| 271 |
+
|
| 272 |
+
def __init__(self, config, **kwargs):
|
| 273 |
+
self.inplanes = 64
|
| 274 |
+
extra = config['MODEL']['EXTRA']
|
| 275 |
+
super(HighResolutionNet, self).__init__()
|
| 276 |
+
|
| 277 |
+
# stem net
|
| 278 |
+
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=2, padding=1,
|
| 279 |
+
bias=False)
|
| 280 |
+
self.bn1 = BatchNorm2d(self.inplanes, momentum=BN_MOMENTUM)
|
| 281 |
+
self.conv2 = nn.Conv2d(self.inplanes, self.inplanes, kernel_size=3, stride=2, padding=1,
|
| 282 |
+
bias=False)
|
| 283 |
+
self.bn2 = BatchNorm2d(self.inplanes, momentum=BN_MOMENTUM)
|
| 284 |
+
self.relu = nn.ReLU(inplace=True)
|
| 285 |
+
self.sf = nn.Softmax(dim=1)
|
| 286 |
+
self.layer1 = self._make_layer(Bottleneck, 64, 64, 4)
|
| 287 |
+
|
| 288 |
+
self.stage2_cfg = extra['STAGE2']
|
| 289 |
+
num_channels = self.stage2_cfg['NUM_CHANNELS']
|
| 290 |
+
block = blocks_dict[self.stage2_cfg['BLOCK']]
|
| 291 |
+
num_channels = [
|
| 292 |
+
num_channels[i] * block.expansion for i in range(len(num_channels))]
|
| 293 |
+
self.transition1 = self._make_transition_layer(
|
| 294 |
+
[256], num_channels)
|
| 295 |
+
self.stage2, pre_stage_channels = self._make_stage(
|
| 296 |
+
self.stage2_cfg, num_channels)
|
| 297 |
+
|
| 298 |
+
self.stage3_cfg = extra['STAGE3']
|
| 299 |
+
num_channels = self.stage3_cfg['NUM_CHANNELS']
|
| 300 |
+
block = blocks_dict[self.stage3_cfg['BLOCK']]
|
| 301 |
+
num_channels = [
|
| 302 |
+
num_channels[i] * block.expansion for i in range(len(num_channels))]
|
| 303 |
+
self.transition2 = self._make_transition_layer(
|
| 304 |
+
pre_stage_channels, num_channels)
|
| 305 |
+
self.stage3, pre_stage_channels = self._make_stage(
|
| 306 |
+
self.stage3_cfg, num_channels)
|
| 307 |
+
|
| 308 |
+
self.stage4_cfg = extra['STAGE4']
|
| 309 |
+
num_channels = self.stage4_cfg['NUM_CHANNELS']
|
| 310 |
+
block = blocks_dict[self.stage4_cfg['BLOCK']]
|
| 311 |
+
num_channels = [
|
| 312 |
+
num_channels[i] * block.expansion for i in range(len(num_channels))]
|
| 313 |
+
self.transition3 = self._make_transition_layer(
|
| 314 |
+
pre_stage_channels, num_channels)
|
| 315 |
+
self.stage4, pre_stage_channels = self._make_stage(
|
| 316 |
+
self.stage4_cfg, num_channels, multi_scale_output=True)
|
| 317 |
+
|
| 318 |
+
self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
|
| 319 |
+
final_inp_channels = sum(pre_stage_channels) + self.inplanes
|
| 320 |
+
|
| 321 |
+
self.head = nn.Sequential(nn.Sequential(
|
| 322 |
+
nn.Conv2d(
|
| 323 |
+
in_channels=final_inp_channels,
|
| 324 |
+
out_channels=final_inp_channels,
|
| 325 |
+
kernel_size=1),
|
| 326 |
+
BatchNorm2d(final_inp_channels, momentum=BN_MOMENTUM),
|
| 327 |
+
nn.ReLU(inplace=True),
|
| 328 |
+
nn.Conv2d(
|
| 329 |
+
in_channels=final_inp_channels,
|
| 330 |
+
out_channels=config['MODEL']['NUM_JOINTS'],
|
| 331 |
+
kernel_size=extra['FINAL_CONV_KERNEL']),
|
| 332 |
+
nn.Softmax(dim=1)))
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
def _make_head(self, x, x_skip):
|
| 337 |
+
x = self.upsample(x)
|
| 338 |
+
x = torch.cat([x, x_skip], dim=1)
|
| 339 |
+
x = self.head(x)
|
| 340 |
+
|
| 341 |
+
return x
|
| 342 |
+
|
| 343 |
+
def _make_transition_layer(
|
| 344 |
+
self, num_channels_pre_layer, num_channels_cur_layer):
|
| 345 |
+
num_branches_cur = len(num_channels_cur_layer)
|
| 346 |
+
num_branches_pre = len(num_channels_pre_layer)
|
| 347 |
+
|
| 348 |
+
transition_layers = []
|
| 349 |
+
for i in range(num_branches_cur):
|
| 350 |
+
if i < num_branches_pre:
|
| 351 |
+
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
|
| 352 |
+
transition_layers.append(nn.Sequential(
|
| 353 |
+
nn.Conv2d(num_channels_pre_layer[i],
|
| 354 |
+
num_channels_cur_layer[i],
|
| 355 |
+
3,
|
| 356 |
+
1,
|
| 357 |
+
1,
|
| 358 |
+
bias=False),
|
| 359 |
+
BatchNorm2d(
|
| 360 |
+
num_channels_cur_layer[i], momentum=BN_MOMENTUM),
|
| 361 |
+
nn.ReLU(inplace=True)))
|
| 362 |
+
else:
|
| 363 |
+
transition_layers.append(None)
|
| 364 |
+
else:
|
| 365 |
+
conv3x3s = []
|
| 366 |
+
for j in range(i + 1 - num_branches_pre):
|
| 367 |
+
inchannels = num_channels_pre_layer[-1]
|
| 368 |
+
outchannels = num_channels_cur_layer[i] \
|
| 369 |
+
if j == i - num_branches_pre else inchannels
|
| 370 |
+
conv3x3s.append(nn.Sequential(
|
| 371 |
+
nn.Conv2d(
|
| 372 |
+
inchannels, outchannels, 3, 2, 1, bias=False),
|
| 373 |
+
BatchNorm2d(outchannels, momentum=BN_MOMENTUM),
|
| 374 |
+
nn.ReLU(inplace=True)))
|
| 375 |
+
transition_layers.append(nn.Sequential(*conv3x3s))
|
| 376 |
+
|
| 377 |
+
return nn.ModuleList(transition_layers)
|
| 378 |
+
|
| 379 |
+
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
|
| 380 |
+
downsample = None
|
| 381 |
+
if stride != 1 or inplanes != planes * block.expansion:
|
| 382 |
+
downsample = nn.Sequential(
|
| 383 |
+
nn.Conv2d(inplanes, planes * block.expansion,
|
| 384 |
+
kernel_size=1, stride=stride, bias=False),
|
| 385 |
+
BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
layers = []
|
| 389 |
+
layers.append(block(inplanes, planes, stride, downsample))
|
| 390 |
+
inplanes = planes * block.expansion
|
| 391 |
+
for i in range(1, blocks):
|
| 392 |
+
layers.append(block(inplanes, planes))
|
| 393 |
+
|
| 394 |
+
return nn.Sequential(*layers)
|
| 395 |
+
|
| 396 |
+
def _make_stage(self, layer_config, num_inchannels,
|
| 397 |
+
multi_scale_output=True):
|
| 398 |
+
num_modules = layer_config['NUM_MODULES']
|
| 399 |
+
num_branches = layer_config['NUM_BRANCHES']
|
| 400 |
+
num_blocks = layer_config['NUM_BLOCKS']
|
| 401 |
+
num_channels = layer_config['NUM_CHANNELS']
|
| 402 |
+
block = blocks_dict[layer_config['BLOCK']]
|
| 403 |
+
fuse_method = layer_config['FUSE_METHOD']
|
| 404 |
+
|
| 405 |
+
modules = []
|
| 406 |
+
for i in range(num_modules):
|
| 407 |
+
# multi_scale_output is only used last module
|
| 408 |
+
if not multi_scale_output and i == num_modules - 1:
|
| 409 |
+
reset_multi_scale_output = False
|
| 410 |
+
else:
|
| 411 |
+
reset_multi_scale_output = True
|
| 412 |
+
modules.append(
|
| 413 |
+
HighResolutionModule(num_branches,
|
| 414 |
+
block,
|
| 415 |
+
num_blocks,
|
| 416 |
+
num_inchannels,
|
| 417 |
+
num_channels,
|
| 418 |
+
fuse_method,
|
| 419 |
+
reset_multi_scale_output)
|
| 420 |
+
)
|
| 421 |
+
num_inchannels = modules[-1].get_num_inchannels()
|
| 422 |
+
|
| 423 |
+
return nn.Sequential(*modules), num_inchannels
|
| 424 |
+
|
| 425 |
+
def forward(self, x):
|
| 426 |
+
# h, w = x.size(2), x.size(3)
|
| 427 |
+
x = self.conv1(x)
|
| 428 |
+
x_skip = x.clone()
|
| 429 |
+
x = self.bn1(x)
|
| 430 |
+
x = self.relu(x)
|
| 431 |
+
x = self.conv2(x)
|
| 432 |
+
x = self.bn2(x)
|
| 433 |
+
x = self.relu(x)
|
| 434 |
+
x = self.layer1(x)
|
| 435 |
+
|
| 436 |
+
x_list = []
|
| 437 |
+
for i in range(self.stage2_cfg['NUM_BRANCHES']):
|
| 438 |
+
if self.transition1[i] is not None:
|
| 439 |
+
x_list.append(self.transition1[i](x))
|
| 440 |
+
else:
|
| 441 |
+
x_list.append(x)
|
| 442 |
+
y_list = self.stage2(x_list)
|
| 443 |
+
|
| 444 |
+
x_list = []
|
| 445 |
+
for i in range(self.stage3_cfg['NUM_BRANCHES']):
|
| 446 |
+
if self.transition2[i] is not None:
|
| 447 |
+
x_list.append(self.transition2[i](y_list[-1]))
|
| 448 |
+
else:
|
| 449 |
+
x_list.append(y_list[i])
|
| 450 |
+
y_list = self.stage3(x_list)
|
| 451 |
+
|
| 452 |
+
x_list = []
|
| 453 |
+
for i in range(self.stage4_cfg['NUM_BRANCHES']):
|
| 454 |
+
if self.transition3[i] is not None:
|
| 455 |
+
x_list.append(self.transition3[i](y_list[-1]))
|
| 456 |
+
else:
|
| 457 |
+
x_list.append(y_list[i])
|
| 458 |
+
x = self.stage4(x_list)
|
| 459 |
+
|
| 460 |
+
# Head Part
|
| 461 |
+
height, width = x[0].size(2), x[0].size(3)
|
| 462 |
+
x1 = F.interpolate(x[1], size=(height, width), mode='bilinear', align_corners=False)
|
| 463 |
+
x2 = F.interpolate(x[2], size=(height, width), mode='bilinear', align_corners=False)
|
| 464 |
+
x3 = F.interpolate(x[3], size=(height, width), mode='bilinear', align_corners=False)
|
| 465 |
+
x = torch.cat([x[0], x1, x2, x3], 1)
|
| 466 |
+
x = self._make_head(x, x_skip)
|
| 467 |
+
|
| 468 |
+
return x
|
| 469 |
+
|
| 470 |
+
def init_weights(self, pretrained=''):
|
| 471 |
+
for m in self.modules():
|
| 472 |
+
if isinstance(m, nn.Conv2d):
|
| 473 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
| 474 |
+
#nn.init.normal_(m.weight, std=0.001)
|
| 475 |
+
#nn.init.constant_(m.bias, 0)
|
| 476 |
+
elif isinstance(m, nn.BatchNorm2d):
|
| 477 |
+
nn.init.constant_(m.weight, 1)
|
| 478 |
+
nn.init.constant_(m.bias, 0)
|
| 479 |
+
if pretrained != '':
|
| 480 |
+
if os.path.isfile(pretrained):
|
| 481 |
+
pretrained_dict = torch.load(pretrained)
|
| 482 |
+
model_dict = self.state_dict()
|
| 483 |
+
pretrained_dict = {k: v for k, v in pretrained_dict.items()
|
| 484 |
+
if k in model_dict.keys()}
|
| 485 |
+
model_dict.update(pretrained_dict)
|
| 486 |
+
self.load_state_dict(model_dict)
|
| 487 |
+
else:
|
| 488 |
+
sys.exit(f'Weights {pretrained} not found.')
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
def get_cls_net(config, pretrained='', **kwargs):
|
| 492 |
+
"""Create keypoint detection model with softmax activation"""
|
| 493 |
+
model = HighResolutionNet(config, **kwargs)
|
| 494 |
+
model.init_weights(pretrained)
|
| 495 |
+
return model
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
def get_cls_net_l(config, pretrained='', **kwargs):
|
| 499 |
+
"""Create line detection model with sigmoid activation"""
|
| 500 |
+
model = HighResolutionNet(config, **kwargs)
|
| 501 |
+
model.init_weights(pretrained)
|
| 502 |
+
|
| 503 |
+
# After loading weights, replace just the activation function
|
| 504 |
+
# The saved model expects the nested Sequential structure
|
| 505 |
+
inner_seq = model.head[0]
|
| 506 |
+
# Replace softmax (index 4) with sigmoid
|
| 507 |
+
model.head[0][4] = nn.Sigmoid()
|
| 508 |
+
|
| 509 |
+
return model
|
| 510 |
+
|
| 511 |
+
# Simplified utility functions - removed complex Gaussian generation functions
|
| 512 |
+
# These were mainly used for training data generation, not inference
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
# generate_gaussian_array_vectorized_dist_l function removed - not used in current implementation
|
| 517 |
+
@torch.inference_mode()
|
| 518 |
+
def run_inference(model, input_tensor: torch.Tensor, device):
|
| 519 |
+
input_tensor = input_tensor.to(device).to(memory_format=torch.channels_last)
|
| 520 |
+
output = model.module().forward(input_tensor)
|
| 521 |
+
return output
|
| 522 |
+
|
| 523 |
+
def preprocess_batch_fast(frames):
|
| 524 |
+
"""Ultra-fast batch preprocessing using optimized tensor operations"""
|
| 525 |
+
target_size = (540, 960) # H, W format for model input
|
| 526 |
+
batch = []
|
| 527 |
+
for i, frame in enumerate(frames):
|
| 528 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 529 |
+
img = cv2.resize(frame_rgb, (target_size[1], target_size[0]))
|
| 530 |
+
img = img.astype(np.float32) / 255.0
|
| 531 |
+
img = np.transpose(img, (2, 0, 1)) # HWC -> CHW
|
| 532 |
+
batch.append(img)
|
| 533 |
+
batch = torch.from_numpy(np.stack(batch)).float()
|
| 534 |
+
|
| 535 |
+
return batch
|
| 536 |
+
|
| 537 |
+
def extract_keypoints_from_heatmap(heatmap: torch.Tensor, scale: int = 2, max_keypoints: int = 1):
|
| 538 |
+
"""Optimized keypoint extraction from heatmaps"""
|
| 539 |
+
batch_size, n_channels, height, width = heatmap.shape
|
| 540 |
+
|
| 541 |
+
# Find local maxima using max pooling (keep on GPU)
|
| 542 |
+
kernel = 3
|
| 543 |
+
pad = 1
|
| 544 |
+
max_pooled = F.max_pool2d(heatmap, kernel, stride=1, padding=pad)
|
| 545 |
+
local_maxima = (max_pooled == heatmap)
|
| 546 |
+
heatmap = heatmap * local_maxima
|
| 547 |
+
|
| 548 |
+
# Get top keypoints (keep on GPU longer)
|
| 549 |
+
scores, indices = torch.topk(heatmap.view(batch_size, n_channels, -1), max_keypoints, sorted=False)
|
| 550 |
+
y_coords = torch.div(indices, width, rounding_mode="floor")
|
| 551 |
+
x_coords = indices % width
|
| 552 |
+
|
| 553 |
+
# Optimized tensor operations
|
| 554 |
+
x_coords = x_coords * scale
|
| 555 |
+
y_coords = y_coords * scale
|
| 556 |
+
|
| 557 |
+
# Create result tensor directly on GPU
|
| 558 |
+
results = torch.stack([x_coords.float(), y_coords.float(), scores], dim=-1)
|
| 559 |
+
|
| 560 |
+
return results
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
def extract_keypoints_from_heatmap_fast(heatmap: torch.Tensor, scale: int = 2, max_keypoints: int = 1):
|
| 564 |
+
"""Ultra-fast keypoint extraction optimized for speed"""
|
| 565 |
+
batch_size, n_channels, height, width = heatmap.shape
|
| 566 |
+
|
| 567 |
+
# Simplified local maxima detection (faster but slightly less accurate)
|
| 568 |
+
max_pooled = F.max_pool2d(heatmap, 3, stride=1, padding=1)
|
| 569 |
+
local_maxima = (max_pooled == heatmap)
|
| 570 |
+
|
| 571 |
+
# Apply mask and get top keypoints in one go
|
| 572 |
+
masked_heatmap = heatmap * local_maxima
|
| 573 |
+
flat_heatmap = masked_heatmap.view(batch_size, n_channels, -1)
|
| 574 |
+
scores, indices = torch.topk(flat_heatmap, max_keypoints, dim=-1, sorted=False)
|
| 575 |
+
|
| 576 |
+
# Vectorized coordinate calculation
|
| 577 |
+
y_coords = torch.div(indices, width, rounding_mode="floor") * scale
|
| 578 |
+
x_coords = (indices % width) * scale
|
| 579 |
+
|
| 580 |
+
# Stack results efficiently
|
| 581 |
+
results = torch.stack([x_coords.float(), y_coords.float(), scores], dim=-1)
|
| 582 |
+
return results
|
| 583 |
+
|
| 584 |
+
|
| 585 |
+
def process_keypoints_vectorized(kp_coords, kp_threshold, w, h, batch_size):
|
| 586 |
+
"""Ultra-fast vectorized keypoint processing"""
|
| 587 |
+
batch_results = []
|
| 588 |
+
|
| 589 |
+
# Convert to numpy once for faster CPU operations
|
| 590 |
+
kp_np = kp_coords.cpu().numpy()
|
| 591 |
+
|
| 592 |
+
for batch_idx in range(batch_size):
|
| 593 |
+
kp_dict = {}
|
| 594 |
+
# Vectorized threshold check
|
| 595 |
+
valid_kps = kp_np[batch_idx, :, 0, 2] > kp_threshold
|
| 596 |
+
valid_indices = np.where(valid_kps)[0]
|
| 597 |
+
|
| 598 |
+
for ch_idx in valid_indices:
|
| 599 |
+
x = float(kp_np[batch_idx, ch_idx, 0, 0]) / w
|
| 600 |
+
y = float(kp_np[batch_idx, ch_idx, 0, 1]) / h
|
| 601 |
+
p = float(kp_np[batch_idx, ch_idx, 0, 2])
|
| 602 |
+
kp_dict[ch_idx + 1] = {'x': x, 'y': y, 'p': p}
|
| 603 |
+
|
| 604 |
+
batch_results.append(kp_dict)
|
| 605 |
+
|
| 606 |
+
return batch_results
|
| 607 |
+
|
| 608 |
+
def inference_batch(frames, model, kp_threshold, device, batch_size=8):
|
| 609 |
+
"""Optimized batch inference for multiple frames"""
|
| 610 |
+
results = []
|
| 611 |
+
num_frames = len(frames)
|
| 612 |
+
|
| 613 |
+
# Get the device from the model itself
|
| 614 |
+
model_device = next(model.parameters()).device
|
| 615 |
+
|
| 616 |
+
# Process all frames in optimally-sized batches
|
| 617 |
+
for i in range(0, num_frames, batch_size):
|
| 618 |
+
current_batch_size = min(batch_size, num_frames - i)
|
| 619 |
+
batch_frames = frames[i:i + current_batch_size]
|
| 620 |
+
|
| 621 |
+
# Fast preprocessing - create on CPU first
|
| 622 |
+
batch = preprocess_batch_fast(batch_frames)
|
| 623 |
+
b, c, h, w = batch.size()
|
| 624 |
+
|
| 625 |
+
# Move batch to model device
|
| 626 |
+
batch = batch.to(model_device)
|
| 627 |
+
|
| 628 |
+
with torch.no_grad():
|
| 629 |
+
heatmaps = model(batch)
|
| 630 |
+
|
| 631 |
+
# Ultra-fast keypoint extraction
|
| 632 |
+
kp_coords = extract_keypoints_from_heatmap_fast(heatmaps[:,:-1,:,:], scale=2, max_keypoints=1)
|
| 633 |
+
|
| 634 |
+
# Vectorized batch processing - no loops
|
| 635 |
+
batch_results = process_keypoints_vectorized(kp_coords, kp_threshold, 960, 540, current_batch_size)
|
| 636 |
+
results.extend(batch_results)
|
| 637 |
+
|
| 638 |
+
# Minimal cleanup
|
| 639 |
+
del heatmaps, kp_coords, batch
|
| 640 |
+
|
| 641 |
+
return results
|
| 642 |
+
|
| 643 |
+
# Keypoint mapping from detection indices to standard football pitch keypoint IDs
|
| 644 |
+
map_keypoints = {
|
| 645 |
+
1: 1, 2: 14, 3: 25, 4: 2, 5: 10, 6: 18, 7: 26, 8: 3, 9: 7, 10: 23,
|
| 646 |
+
11: 27, 20: 4, 21: 8, 22: 24, 23: 28, 24: 5, 25: 13, 26: 21, 27: 29,
|
| 647 |
+
28: 6, 29: 17, 30: 30, 31: 11, 32: 15, 33: 19, 34: 12, 35: 16, 36: 20,
|
| 648 |
+
45: 9, 50: 31, 52: 32, 57: 22
|
| 649 |
+
}
|
| 650 |
+
|
| 651 |
+
def get_mapped_keypoints(kp_points):
|
| 652 |
+
"""Apply keypoint mapping to detection results"""
|
| 653 |
+
mapped_points = {}
|
| 654 |
+
for key, value in kp_points.items():
|
| 655 |
+
if key in map_keypoints:
|
| 656 |
+
mapped_key = map_keypoints[key]
|
| 657 |
+
mapped_points[mapped_key] = value
|
| 658 |
+
# else:
|
| 659 |
+
# Keep unmapped keypoints with original key
|
| 660 |
+
# mapped_points[key] = value
|
| 661 |
+
return mapped_points
|
| 662 |
+
|
| 663 |
+
def process_batch_input(frames, model, kp_threshold, device, batch_size=8):
|
| 664 |
+
"""Process multiple input images in batch"""
|
| 665 |
+
# Batch inference
|
| 666 |
+
kp_results = inference_batch(frames, model, kp_threshold, device, batch_size)
|
| 667 |
+
kp_results = [get_mapped_keypoints(kp) for kp in kp_results]
|
| 668 |
+
# Draw results and save
|
| 669 |
+
# for i, (frame, kp_points, input_path) in enumerate(zip(frames, kp_results, valid_paths)):
|
| 670 |
+
# height, width = frame.shape[:2]
|
| 671 |
+
|
| 672 |
+
# # Apply mapping to get standard keypoint IDs
|
| 673 |
+
# mapped_kp_points = get_mapped_keypoints(kp_points)
|
| 674 |
+
|
| 675 |
+
# for key, value in mapped_kp_points.items():
|
| 676 |
+
# x = int(value['x'] * width)
|
| 677 |
+
# y = int(value['y'] * height)
|
| 678 |
+
# cv2.circle(frame, (x, y), 5, (0, 255, 0), -1) # Green circles
|
| 679 |
+
# cv2.putText(frame, str(key), (x+10, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
|
| 680 |
+
|
| 681 |
+
# # Save result
|
| 682 |
+
# output_path = input_path.replace('.png', '_result.png').replace('.jpg', '_result.jpg')
|
| 683 |
+
# cv2.imwrite(output_path, frame)
|
| 684 |
+
|
| 685 |
+
# print(f"Batch processing complete. Processed {len(frames)} images.")
|
| 686 |
+
|
| 687 |
+
return kp_results
|
player.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ce9fc31f61e6f156f786077abb8eef36b0836bda1ef07d1d0ba82d43ae0ecd0b
|
| 3 |
+
size 22540152
|
player.py
ADDED
|
@@ -0,0 +1,389 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
from sklearn.cluster import KMeans
|
| 4 |
+
import warnings
|
| 5 |
+
import time
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torchvision.ops import batched_nms
|
| 9 |
+
from numpy import ndarray
|
| 10 |
+
# Suppress ALL runtime and sklearn warnings
|
| 11 |
+
warnings.filterwarnings('ignore', category=RuntimeWarning)
|
| 12 |
+
warnings.filterwarnings('ignore', category=FutureWarning)
|
| 13 |
+
warnings.filterwarnings('ignore', category=UserWarning)
|
| 14 |
+
|
| 15 |
+
# Suppress sklearn warnings specifically
|
| 16 |
+
import logging
|
| 17 |
+
logging.getLogger('sklearn').setLevel(logging.ERROR)
|
| 18 |
+
|
| 19 |
+
def get_grass_color(img):
|
| 20 |
+
# Convert image to HSV color space
|
| 21 |
+
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
|
| 22 |
+
|
| 23 |
+
# Define range of green color in HSV
|
| 24 |
+
lower_green = np.array([30, 40, 40])
|
| 25 |
+
upper_green = np.array([80, 255, 255])
|
| 26 |
+
|
| 27 |
+
# Threshold the HSV image to get only green colors
|
| 28 |
+
mask = cv2.inRange(hsv, lower_green, upper_green)
|
| 29 |
+
|
| 30 |
+
# Calculate the mean value of the pixels that are not masked
|
| 31 |
+
masked_img = cv2.bitwise_and(img, img, mask=mask)
|
| 32 |
+
grass_color = cv2.mean(img, mask=mask)
|
| 33 |
+
return grass_color[:3]
|
| 34 |
+
|
| 35 |
+
def get_players_boxes(frame, result):
|
| 36 |
+
players_imgs = []
|
| 37 |
+
players_boxes = []
|
| 38 |
+
for (box, score, cls) in result:
|
| 39 |
+
label = int(cls)
|
| 40 |
+
if label == 0:
|
| 41 |
+
x1, y1, x2, y2 = box.astype(int)
|
| 42 |
+
player_img = frame[y1: y2, x1: x2]
|
| 43 |
+
players_imgs.append(player_img)
|
| 44 |
+
players_boxes.append([box, score, cls])
|
| 45 |
+
return players_imgs, players_boxes
|
| 46 |
+
|
| 47 |
+
def get_kits_colors(players, grass_hsv=None, frame=None):
|
| 48 |
+
kits_colors = []
|
| 49 |
+
if grass_hsv is None:
|
| 50 |
+
grass_color = get_grass_color(frame)
|
| 51 |
+
grass_hsv = cv2.cvtColor(np.uint8([[list(grass_color)]]), cv2.COLOR_BGR2HSV)
|
| 52 |
+
|
| 53 |
+
for player_img in players:
|
| 54 |
+
# Skip empty or invalid images
|
| 55 |
+
if player_img is None or player_img.size == 0 or len(player_img.shape) != 3:
|
| 56 |
+
continue
|
| 57 |
+
|
| 58 |
+
# Convert image to HSV color space
|
| 59 |
+
hsv = cv2.cvtColor(player_img, cv2.COLOR_BGR2HSV)
|
| 60 |
+
|
| 61 |
+
# Define range of green color in HSV
|
| 62 |
+
lower_green = np.array([grass_hsv[0, 0, 0] - 10, 40, 40])
|
| 63 |
+
upper_green = np.array([grass_hsv[0, 0, 0] + 10, 255, 255])
|
| 64 |
+
|
| 65 |
+
# Threshold the HSV image to get only green colors
|
| 66 |
+
mask = cv2.inRange(hsv, lower_green, upper_green)
|
| 67 |
+
|
| 68 |
+
# Bitwise-AND mask and original image
|
| 69 |
+
mask = cv2.bitwise_not(mask)
|
| 70 |
+
upper_mask = np.zeros(player_img.shape[:2], np.uint8)
|
| 71 |
+
upper_mask[0:player_img.shape[0]//2, 0:player_img.shape[1]] = 255
|
| 72 |
+
mask = cv2.bitwise_and(mask, upper_mask)
|
| 73 |
+
|
| 74 |
+
kit_color = np.array(cv2.mean(player_img, mask=mask)[:3])
|
| 75 |
+
|
| 76 |
+
kits_colors.append(kit_color)
|
| 77 |
+
return kits_colors
|
| 78 |
+
|
| 79 |
+
def get_kits_classifier(kits_colors):
|
| 80 |
+
if len(kits_colors) == 0:
|
| 81 |
+
return None
|
| 82 |
+
if len(kits_colors) == 1:
|
| 83 |
+
# Only one kit color, create a dummy classifier
|
| 84 |
+
return None
|
| 85 |
+
kits_kmeans = KMeans(n_clusters=2)
|
| 86 |
+
kits_kmeans.fit(kits_colors)
|
| 87 |
+
return kits_kmeans
|
| 88 |
+
|
| 89 |
+
def classify_kits(kits_classifer, kits_colors):
|
| 90 |
+
if kits_classifer is None or len(kits_colors) == 0:
|
| 91 |
+
return np.array([0]) # Default to team 0
|
| 92 |
+
team = kits_classifer.predict(kits_colors)
|
| 93 |
+
return team
|
| 94 |
+
|
| 95 |
+
def get_left_team_label(players_boxes, kits_colors, kits_clf):
|
| 96 |
+
left_team_label = 0
|
| 97 |
+
team_0 = []
|
| 98 |
+
team_1 = []
|
| 99 |
+
|
| 100 |
+
for i in range(len(players_boxes)):
|
| 101 |
+
x1, y1, x2, y2 = players_boxes[i][0].astype(int)
|
| 102 |
+
team = classify_kits(kits_clf, [kits_colors[i]]).item()
|
| 103 |
+
if team == 0:
|
| 104 |
+
team_0.append(np.array([x1]))
|
| 105 |
+
else:
|
| 106 |
+
team_1.append(np.array([x1]))
|
| 107 |
+
|
| 108 |
+
team_0 = np.array(team_0)
|
| 109 |
+
team_1 = np.array(team_1)
|
| 110 |
+
|
| 111 |
+
# Safely calculate averages with fallback for empty arrays
|
| 112 |
+
avg_team_0 = np.average(team_0) if len(team_0) > 0 else 0
|
| 113 |
+
avg_team_1 = np.average(team_1) if len(team_1) > 0 else 0
|
| 114 |
+
|
| 115 |
+
if avg_team_0 - avg_team_1 > 0:
|
| 116 |
+
left_team_label = 1
|
| 117 |
+
|
| 118 |
+
return left_team_label
|
| 119 |
+
|
| 120 |
+
def check_box_boundaries(boxes, img_height, img_width):
|
| 121 |
+
"""
|
| 122 |
+
Check if bounding boxes are within image boundaries and clip them if necessary.
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
boxes: numpy array of shape (N, 4) with [x1, y1, x2, y2] format
|
| 126 |
+
img_height: height of the image
|
| 127 |
+
img_width: width of the image
|
| 128 |
+
|
| 129 |
+
Returns:
|
| 130 |
+
valid_boxes: numpy array of valid boxes within boundaries
|
| 131 |
+
valid_indices: indices of valid boxes
|
| 132 |
+
"""
|
| 133 |
+
x1, y1, x2, y2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
|
| 134 |
+
|
| 135 |
+
# Check if boxes are within boundaries
|
| 136 |
+
valid_mask = (x1 >= 0) & (y1 >= 0) & (x2 < img_width) & (y2 < img_height) & (x1 < x2) & (y1 < y2)
|
| 137 |
+
|
| 138 |
+
if not np.any(valid_mask):
|
| 139 |
+
return np.array([]), np.array([])
|
| 140 |
+
|
| 141 |
+
valid_boxes = boxes[valid_mask]
|
| 142 |
+
valid_indices = np.where(valid_mask)[0]
|
| 143 |
+
|
| 144 |
+
# Clip boxes to image boundaries
|
| 145 |
+
valid_boxes[:, 0] = np.clip(valid_boxes[:, 0], 0, img_width - 1) # x1
|
| 146 |
+
valid_boxes[:, 1] = np.clip(valid_boxes[:, 1], 0, img_height - 1) # y1
|
| 147 |
+
valid_boxes[:, 2] = np.clip(valid_boxes[:, 2], 0, img_width - 1) # x2
|
| 148 |
+
valid_boxes[:, 3] = np.clip(valid_boxes[:, 3], 0, img_height - 1) # y2
|
| 149 |
+
|
| 150 |
+
return valid_boxes, valid_indices
|
| 151 |
+
|
| 152 |
+
def process_team_identification_batch(frames, results, kits_clf, left_team_label, grass_hsv):
|
| 153 |
+
"""
|
| 154 |
+
Process team identification and label formatting for batch results.
|
| 155 |
+
|
| 156 |
+
Args:
|
| 157 |
+
frames: list of frames
|
| 158 |
+
results: list of detection results for each frame
|
| 159 |
+
kits_clf: trained kit classifier
|
| 160 |
+
left_team_label: label for left team
|
| 161 |
+
grass_hsv: grass color in HSV format
|
| 162 |
+
|
| 163 |
+
Returns:
|
| 164 |
+
processed_results: list of processed results with team identification
|
| 165 |
+
"""
|
| 166 |
+
processed_results = []
|
| 167 |
+
|
| 168 |
+
for frame_idx, frame in enumerate(frames):
|
| 169 |
+
frame_results = []
|
| 170 |
+
frame_detections = results[frame_idx]
|
| 171 |
+
|
| 172 |
+
if not frame_detections:
|
| 173 |
+
processed_results.append([])
|
| 174 |
+
continue
|
| 175 |
+
|
| 176 |
+
# Extract player boxes and images
|
| 177 |
+
players_imgs = []
|
| 178 |
+
players_boxes = []
|
| 179 |
+
player_indices = []
|
| 180 |
+
|
| 181 |
+
for idx, (box, score, cls) in enumerate(frame_detections):
|
| 182 |
+
label = int(cls)
|
| 183 |
+
if label == 0: # Player detection
|
| 184 |
+
x1, y1, x2, y2 = box.astype(int)
|
| 185 |
+
|
| 186 |
+
# Check boundaries
|
| 187 |
+
if (x1 >= 0 and y1 >= 0 and x2 < frame.shape[1] and y2 < frame.shape[0] and x1 < x2 and y1 < y2):
|
| 188 |
+
player_img = frame[y1:y2, x1:x2]
|
| 189 |
+
if player_img.size > 0: # Ensure valid image
|
| 190 |
+
players_imgs.append(player_img)
|
| 191 |
+
players_boxes.append([box, score, cls])
|
| 192 |
+
player_indices.append(idx)
|
| 193 |
+
|
| 194 |
+
# Initialize player team mapping
|
| 195 |
+
player_team_map = {}
|
| 196 |
+
|
| 197 |
+
# Process team identification if we have players
|
| 198 |
+
if players_imgs and kits_clf is not None:
|
| 199 |
+
kits_colors = get_kits_colors(players_imgs, grass_hsv)
|
| 200 |
+
teams = classify_kits(kits_clf, kits_colors)
|
| 201 |
+
|
| 202 |
+
# Create mapping from player index to team
|
| 203 |
+
for i, team in enumerate(teams):
|
| 204 |
+
player_team_map[player_indices[i]] = team.item()
|
| 205 |
+
|
| 206 |
+
id = 0
|
| 207 |
+
# Process all detections with team identification
|
| 208 |
+
for idx, (box, score, cls) in enumerate(frame_detections):
|
| 209 |
+
label = int(cls)
|
| 210 |
+
x1, y1, x2, y2 = box.astype(int)
|
| 211 |
+
|
| 212 |
+
# Check boundaries
|
| 213 |
+
valid_boxes, valid_indices = check_box_boundaries(
|
| 214 |
+
np.array([[x1, y1, x2, y2]]), frame.shape[0], frame.shape[1]
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
if len(valid_boxes) == 0:
|
| 218 |
+
continue
|
| 219 |
+
|
| 220 |
+
x1, y1, x2, y2 = valid_boxes[0].astype(int)
|
| 221 |
+
|
| 222 |
+
# Apply team identification logic
|
| 223 |
+
if label == 0: # Player
|
| 224 |
+
if players_imgs and kits_clf is not None and idx in player_team_map:
|
| 225 |
+
team = player_team_map[idx]
|
| 226 |
+
if team == left_team_label:
|
| 227 |
+
final_label = 6 # Player-L (Left team)
|
| 228 |
+
else:
|
| 229 |
+
final_label = 7 # Player-R (Right team)
|
| 230 |
+
else:
|
| 231 |
+
final_label = 6 # Default player label
|
| 232 |
+
|
| 233 |
+
elif label == 1: # Goalkeeper
|
| 234 |
+
final_label = 1 # GK
|
| 235 |
+
|
| 236 |
+
elif label == 2: # Ball
|
| 237 |
+
final_label = 0 # Ball
|
| 238 |
+
|
| 239 |
+
elif label == 3 or label == 4: # Referee or other
|
| 240 |
+
final_label = 3 # Referee
|
| 241 |
+
|
| 242 |
+
else:
|
| 243 |
+
continue
|
| 244 |
+
# final_label = int(label) # Keep original label, ensure it's int
|
| 245 |
+
|
| 246 |
+
frame_results.append({
|
| 247 |
+
"id": int(id),
|
| 248 |
+
"bbox": [int(x1), int(y1), int(x2), int(y2)],
|
| 249 |
+
"class_id": int(final_label),
|
| 250 |
+
"conf": float(score)
|
| 251 |
+
})
|
| 252 |
+
id = id + 1
|
| 253 |
+
|
| 254 |
+
processed_results.append(frame_results)
|
| 255 |
+
|
| 256 |
+
return processed_results
|
| 257 |
+
|
| 258 |
+
def convert_numpy_types(obj):
|
| 259 |
+
"""Convert numpy types to native Python types for JSON serialization."""
|
| 260 |
+
if isinstance(obj, np.integer):
|
| 261 |
+
return int(obj)
|
| 262 |
+
elif isinstance(obj, np.floating):
|
| 263 |
+
return float(obj)
|
| 264 |
+
elif isinstance(obj, np.ndarray):
|
| 265 |
+
return obj.tolist()
|
| 266 |
+
elif isinstance(obj, dict):
|
| 267 |
+
return {key: convert_numpy_types(value) for key, value in obj.items()}
|
| 268 |
+
elif isinstance(obj, list):
|
| 269 |
+
return [convert_numpy_types(item) for item in obj]
|
| 270 |
+
else:
|
| 271 |
+
return obj
|
| 272 |
+
|
| 273 |
+
def pre_process_img(frames, scale):
|
| 274 |
+
imgs = np.stack([cv2.resize(frame, (int(scale), int(scale))) for frame in frames])
|
| 275 |
+
imgs = imgs.transpose(0, 3, 1, 2)
|
| 276 |
+
imgs = imgs.astype(np.float32) / 255.0 # Normalize
|
| 277 |
+
return imgs
|
| 278 |
+
|
| 279 |
+
def post_process_output(outputs, x_scale, y_scale, conf_thresh=0.6, nms_thresh=0.75):
|
| 280 |
+
B, C, N = outputs.shape
|
| 281 |
+
outputs = torch.from_numpy(outputs)
|
| 282 |
+
outputs = outputs.permute(0, 2, 1)
|
| 283 |
+
boxes = outputs[..., :4]
|
| 284 |
+
class_scores = 1 / (1 + torch.exp(-outputs[..., 4:]))
|
| 285 |
+
conf, class_id = class_scores.max(dim=2)
|
| 286 |
+
|
| 287 |
+
mask = conf > conf_thresh
|
| 288 |
+
|
| 289 |
+
for i in range(class_id.shape[0]): # loop over batch
|
| 290 |
+
# Find detections that are balls
|
| 291 |
+
ball_idx = np.where(class_id[i] == 2)[0]
|
| 292 |
+
if ball_idx.size > 0:
|
| 293 |
+
# Pick the one with the highest confidence
|
| 294 |
+
top = ball_idx[np.argmax(conf[i, ball_idx])]
|
| 295 |
+
if conf[i, top] > 0.55: # apply confidence threshold
|
| 296 |
+
mask[i, top] = True
|
| 297 |
+
|
| 298 |
+
# ball_mask = (class_id == 2) & (conf > 0.51)
|
| 299 |
+
# mask = mask | ball_mask
|
| 300 |
+
|
| 301 |
+
batch_idx, pred_idx = mask.nonzero(as_tuple=True)
|
| 302 |
+
|
| 303 |
+
if len(batch_idx) == 0:
|
| 304 |
+
return [[] for _ in range(B)]
|
| 305 |
+
|
| 306 |
+
boxes = boxes[batch_idx, pred_idx]
|
| 307 |
+
conf = conf[batch_idx, pred_idx]
|
| 308 |
+
class_id = class_id[batch_idx, pred_idx]
|
| 309 |
+
|
| 310 |
+
x, y, w, h = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
|
| 311 |
+
x1 = (x - w / 2) * x_scale
|
| 312 |
+
y1 = (y - h / 2) * y_scale
|
| 313 |
+
x2 = (x + w / 2) * x_scale
|
| 314 |
+
y2 = (y + h / 2) * y_scale
|
| 315 |
+
boxes_xyxy = torch.stack([x1, y1, x2, y2], dim=1)
|
| 316 |
+
|
| 317 |
+
max_coord = 1e4
|
| 318 |
+
offset = batch_idx.to(boxes_xyxy) * max_coord
|
| 319 |
+
boxes_for_nms = boxes_xyxy + offset[:, None]
|
| 320 |
+
|
| 321 |
+
keep = batched_nms(boxes_for_nms, conf, batch_idx, nms_thresh)
|
| 322 |
+
|
| 323 |
+
boxes_final = boxes_xyxy[keep]
|
| 324 |
+
conf_final = conf[keep]
|
| 325 |
+
class_final = class_id[keep]
|
| 326 |
+
batch_final = batch_idx[keep]
|
| 327 |
+
|
| 328 |
+
results = [[] for _ in range(B)]
|
| 329 |
+
for b in range(B):
|
| 330 |
+
mask_b = batch_final == b
|
| 331 |
+
if mask_b.sum() == 0:
|
| 332 |
+
continue
|
| 333 |
+
results[b] = list(zip(boxes_final[mask_b].numpy(),
|
| 334 |
+
conf_final[mask_b].numpy(),
|
| 335 |
+
class_final[mask_b].numpy()))
|
| 336 |
+
return results
|
| 337 |
+
|
| 338 |
+
def player_detection_result(frames: list[ndarray], batch_size, model, kits_clf=None, left_team_label=None, grass_hsv=None):
|
| 339 |
+
start_time = time.time()
|
| 340 |
+
# input_layer = model.input(0)
|
| 341 |
+
# output_layer = model.output(0)
|
| 342 |
+
height, width = frames[0].shape[:2]
|
| 343 |
+
scale = 640.0
|
| 344 |
+
x_scale = width / scale
|
| 345 |
+
y_scale = height / scale
|
| 346 |
+
|
| 347 |
+
# infer_queue = AsyncInferQueue(model, len(frames))
|
| 348 |
+
|
| 349 |
+
infer_time = time.time()
|
| 350 |
+
kits_clf = kits_clf
|
| 351 |
+
left_team_label = left_team_label
|
| 352 |
+
grass_hsv = grass_hsv
|
| 353 |
+
results = []
|
| 354 |
+
for i in range(0, len(frames), batch_size):
|
| 355 |
+
if i + batch_size > len(frames):
|
| 356 |
+
batch_size = len(frames) - i
|
| 357 |
+
batch_frames = frames[i:i + batch_size]
|
| 358 |
+
imgs = pre_process_img(batch_frames, scale)
|
| 359 |
+
|
| 360 |
+
input_name = model.get_inputs()[0].name
|
| 361 |
+
outputs = model.run(None, {input_name: imgs})[0]
|
| 362 |
+
raw_results = post_process_output(np.array(outputs), x_scale, y_scale)
|
| 363 |
+
|
| 364 |
+
if kits_clf is None or left_team_label is None or grass_hsv is None:
|
| 365 |
+
# Use first frame to initialize team classification
|
| 366 |
+
first_frame = batch_frames[0]
|
| 367 |
+
first_frame_results = raw_results[0] if raw_results else []
|
| 368 |
+
|
| 369 |
+
if first_frame_results:
|
| 370 |
+
players_imgs, players_boxes = get_players_boxes(first_frame, first_frame_results)
|
| 371 |
+
if players_imgs:
|
| 372 |
+
grass_color = get_grass_color(first_frame)
|
| 373 |
+
grass_hsv = cv2.cvtColor(np.uint8([[list(grass_color)]]), cv2.COLOR_BGR2HSV)
|
| 374 |
+
kits_colors = get_kits_colors(players_imgs, grass_hsv)
|
| 375 |
+
if kits_colors: # Only proceed if we have valid kit colors
|
| 376 |
+
kits_clf = get_kits_classifier(kits_colors)
|
| 377 |
+
if kits_clf is not None:
|
| 378 |
+
left_team_label = int(get_left_team_label(players_boxes, kits_colors, kits_clf))
|
| 379 |
+
|
| 380 |
+
# Process team identification and boundary checking
|
| 381 |
+
processed_results = process_team_identification_batch(
|
| 382 |
+
batch_frames, raw_results, kits_clf, left_team_label, grass_hsv
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
processed_results = convert_numpy_types(processed_results)
|
| 386 |
+
results.extend(processed_results)
|
| 387 |
+
|
| 388 |
+
# Return the same format as before for compatibility
|
| 389 |
+
return results, kits_clf, left_team_label, grass_hsv
|
team_cluster.pyc
ADDED
|
Binary file (7.62 kB). View file
|
|
|