diff --git a/data/occurrences/11_01_23-DJI_0977.csv b/data/occurrences/11_01_23-DJI_0977.csv new file mode 100644 index 0000000000000000000000000000000000000000..159202a268c82e3daa7d152a149263d00670aeee --- /dev/null +++ b/data/occurrences/11_01_23-DJI_0977.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50b50f60dc3c52da042a23ab882e74945849d8e1e759cef1e4a43fd96c1bf35f +size 14345580 diff --git a/data/occurrences/11_01_23-DJI_0978.csv b/data/occurrences/11_01_23-DJI_0978.csv new file mode 100644 index 0000000000000000000000000000000000000000..a030b70abe9a04936c1b81edfe2604254478487e --- /dev/null +++ b/data/occurrences/11_01_23-DJI_0978.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34d62178f47214ff697be175d4932d76457cecb14de168f95b0fc65736757779 +size 4707447 diff --git a/data/occurrences/11_01_23-DJI_0979.csv b/data/occurrences/11_01_23-DJI_0979.csv new file mode 100644 index 0000000000000000000000000000000000000000..876454d5b8702005507a18066861d4442a123d15 --- /dev/null +++ b/data/occurrences/11_01_23-DJI_0979.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76f56cfffbd31ec514a81891026ea75e7fccd095d3350f8072c2a40d55cea9e5 +size 5069349 diff --git a/data/occurrences/11_01_23-DJI_0980.csv b/data/occurrences/11_01_23-DJI_0980.csv new file mode 100644 index 0000000000000000000000000000000000000000..e9db46c502d445fc126f9e2feab44b2fa90dec2f --- /dev/null +++ b/data/occurrences/11_01_23-DJI_0980.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:828e26b2bd31afee01c80c6293d92761473244909c1730bdf0cd816f4826d380 +size 17952129 diff --git a/data/occurrences/12_01_23-DJI_0001.csv b/data/occurrences/12_01_23-DJI_0001.csv new file mode 100644 index 0000000000000000000000000000000000000000..f6e304a0d3e6854a366d6c82d18968c836e37c7c --- /dev/null +++ b/data/occurrences/12_01_23-DJI_0001.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:063b1d2607a004dc420eb8dc3b7931573ba0125629e3c2110198cfaf45a757d4 +size 72939976 diff --git a/data/occurrences/12_01_23-DJI_0002.csv b/data/occurrences/12_01_23-DJI_0002.csv new file mode 100644 index 0000000000000000000000000000000000000000..d16769babebba8091d2abe65f6ba10c08754192d --- /dev/null +++ b/data/occurrences/12_01_23-DJI_0002.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61698a88cccdea95c9dad90209beac8960673f7cf26376efddc59ff129eb14de +size 15943851 diff --git a/data/occurrences/12_01_23-DJI_0003.csv b/data/occurrences/12_01_23-DJI_0003.csv new file mode 100644 index 0000000000000000000000000000000000000000..eaee3d6ba2380800fad708e3f059e37773fa7b3e --- /dev/null +++ b/data/occurrences/12_01_23-DJI_0003.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:473cf4b2158212bfb07a30e2e041f999b961fc965e9a6a74a9563bbbe09649f2 +size 393014 diff --git a/data/occurrences/12_01_23-DJI_0006.csv b/data/occurrences/12_01_23-DJI_0006.csv new file mode 100644 index 0000000000000000000000000000000000000000..09ea64932b71dcb80e1bb7aa9431e0281def22cd --- /dev/null +++ b/data/occurrences/12_01_23-DJI_0006.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24e9f7957961bbb119652b31fde8c00c97781cab02381d4a943f5bd23efdfb9a +size 13992446 diff --git a/data/occurrences/12_01_23-DJI_0007.csv b/data/occurrences/12_01_23-DJI_0007.csv new file mode 100644 index 0000000000000000000000000000000000000000..526e6537876a37f1f32add48995b26ad9a4c6345 --- /dev/null +++ b/data/occurrences/12_01_23-DJI_0007.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2d6c94bd52278800a2885797cb328e824c4ec8b901c09141e1f0458beb8f3ea +size 23336478 diff --git a/data/occurrences/12_01_23-DJI_0008.csv b/data/occurrences/12_01_23-DJI_0008.csv new file mode 100644 index 0000000000000000000000000000000000000000..ef3da5c55aabfd91e310c67cc563434e1c1d9ada --- /dev/null +++ b/data/occurrences/12_01_23-DJI_0008.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:962c8c1f1feeb6703cb73230b800e5fac262a45047ebb73ce5248921562a8a61 +size 10031966 diff --git a/data/occurrences/12_01_23-DJI_0987.csv b/data/occurrences/12_01_23-DJI_0987.csv new file mode 100644 index 0000000000000000000000000000000000000000..19d92f2bd1805c2dc359978ca0b2999bcd3c8c94 --- /dev/null +++ b/data/occurrences/12_01_23-DJI_0987.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbcf7f65fc5aff7f098e319a134450be27a3ed0555f9bd9709c39c92896b7a05 +size 11339992 diff --git a/data/occurrences/12_01_23-DJI_0988.csv b/data/occurrences/12_01_23-DJI_0988.csv new file mode 100644 index 0000000000000000000000000000000000000000..1ae7e674c1f54cafa570d975395a0fe6db8a627f --- /dev/null +++ b/data/occurrences/12_01_23-DJI_0988.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf9047be691bd6bd5b4b5d44a0da450b6dd7a3830d0bd44d081e044c74cd56d7 +size 15173906 diff --git a/data/occurrences/12_01_23-DJI_0989.csv b/data/occurrences/12_01_23-DJI_0989.csv new file mode 100644 index 0000000000000000000000000000000000000000..f2f77b4513fa9a2b6089d026e47d710d6ec1ff93 --- /dev/null +++ b/data/occurrences/12_01_23-DJI_0989.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b18174dbd8f092e650ff1a8e0a030e859cda0d306738ddad86d581a1aa555dfb +size 3635599 diff --git a/data/occurrences/12_01_23-DJI_0992.csv b/data/occurrences/12_01_23-DJI_0992.csv new file mode 100644 index 0000000000000000000000000000000000000000..57079ed16adb889b023b44d2de827f2cc3bba862 --- /dev/null +++ b/data/occurrences/12_01_23-DJI_0992.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d2ee072132b6da70013ff058c4008eafa2dc91f75d83c880eb86a9827c3053c +size 4940182 diff --git a/data/occurrences/12_01_23-DJI_0994.csv b/data/occurrences/12_01_23-DJI_0994.csv new file mode 100644 index 0000000000000000000000000000000000000000..80ecbcb407aa6f393efeb4885907dc8c9c84a645 --- /dev/null +++ b/data/occurrences/12_01_23-DJI_0994.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:feff00240d115a46be0513177508aa462740c86c838d2d69ce73a2390c5bff50 +size 3355881 diff --git a/data/occurrences/12_01_23-DJI_0997.csv b/data/occurrences/12_01_23-DJI_0997.csv new file mode 100644 index 0000000000000000000000000000000000000000..35d987ea1a6999f576f92765aa3c2aefc98ec2ac --- /dev/null +++ b/data/occurrences/12_01_23-DJI_0997.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36cf5cfc61e879023b4b72a114b0ba780f29e8976f355a1bfe4cecb3409403ab +size 24568166 diff --git a/data/occurrences/12_01_23-DJI_0998.csv b/data/occurrences/12_01_23-DJI_0998.csv new file mode 100644 index 0000000000000000000000000000000000000000..5518acedc913a188d0729ecf4876661fce0308ac --- /dev/null +++ b/data/occurrences/12_01_23-DJI_0998.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71f72036b13beeaa38f914eaa87271c31f9895c877291b0569cadb0039376cbd +size 4709447 diff --git a/data/occurrences/13_01_23-DJI_0009.csv b/data/occurrences/13_01_23-DJI_0009.csv new file mode 100644 index 0000000000000000000000000000000000000000..a71adee18194cde810e3242dd091dba3d68a274a --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0009.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8be9eadb5f7261283e20e39eb4ab9784d5aab6eff38cbaf8c599dccb7d75612 +size 885424 diff --git a/data/occurrences/13_01_23-DJI_0011.csv b/data/occurrences/13_01_23-DJI_0011.csv new file mode 100644 index 0000000000000000000000000000000000000000..9b17c7fa3525cd399985bd98b2544cb7aa6eec43 --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0011.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11c5a419893474e8367afd0621a3bd3d8f609111dc0268579d73c0f2f753bbe3 +size 8603958 diff --git a/data/occurrences/13_01_23-DJI_0012.csv b/data/occurrences/13_01_23-DJI_0012.csv new file mode 100644 index 0000000000000000000000000000000000000000..7749df5d7ea91ba676a37859208524854e7b5d04 --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0012.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4324d88a6e239f9d697665caabbe709b86e0e58bdf08066795480f23e241eff0 +size 4987483 diff --git a/data/occurrences/13_01_23-DJI_0013.csv b/data/occurrences/13_01_23-DJI_0013.csv new file mode 100644 index 0000000000000000000000000000000000000000..f4fd5e6cdab35aa52392c936d9d42d50e61dc3be --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0013.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1da985cabc3df39703a7163a7bad6c0d981e0cead7d047944e1a533f69a8a142 +size 2605445 diff --git a/data/occurrences/13_01_23-DJI_0015.csv b/data/occurrences/13_01_23-DJI_0015.csv new file mode 100644 index 0000000000000000000000000000000000000000..42c120fc4d7c2d640e973aef8be9cbd70bc4a8be --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0015.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83eccb91f1099871d449bbaf494865dc5346a3cfeca66ef9c342ec6b195960f4 +size 724085 diff --git a/data/occurrences/13_01_23-DJI_0016.csv b/data/occurrences/13_01_23-DJI_0016.csv new file mode 100644 index 0000000000000000000000000000000000000000..f0d29abaa093ae50ad8f625065c93318a58b9aa5 --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0016.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da174aed2695485960517dbb90fce5b66c21a3568df64ef94958229d4a5bee06 +size 1101965 diff --git a/data/occurrences/13_01_23-DJI_0018.csv b/data/occurrences/13_01_23-DJI_0018.csv new file mode 100644 index 0000000000000000000000000000000000000000..af0d27b8460efc1caa0e74812107c1a05e4f3919 --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0018.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee1ec8c76429905ad91cca2c78f1594f3296ded5b668f5be942da35d8c5a67c3 +size 3502983 diff --git a/data/occurrences/13_01_23-DJI_0019.csv b/data/occurrences/13_01_23-DJI_0019.csv new file mode 100644 index 0000000000000000000000000000000000000000..eefd8a79808a460bf571b53ecd1f67626fb741a3 --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0019.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e8412006abf0c006a8aa7363ff910b2792edd4b2921a9e154c91e6af3ae5c5e +size 21287954 diff --git a/data/occurrences/13_01_23-DJI_0020.csv b/data/occurrences/13_01_23-DJI_0020.csv new file mode 100644 index 0000000000000000000000000000000000000000..7344ff62d9998bf5e6b1f95855677f98ef1a7c5e --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0020.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e221f97e792e9969eae4b24b061d6bcc2c91a60b95f6e0c928afff8ec6b8bf1c +size 36827652 diff --git a/data/occurrences/13_01_23-DJI_0021.csv b/data/occurrences/13_01_23-DJI_0021.csv new file mode 100644 index 0000000000000000000000000000000000000000..5e5c76e1efe5c48f4222a9266f99f0992001c9a4 --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0021.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6edb66a9d153f7042744bc11c72183011426a7e23e9133547cd5eab2072ccbb8 +size 2885156 diff --git a/data/occurrences/13_01_23-DJI_0022.csv b/data/occurrences/13_01_23-DJI_0022.csv new file mode 100644 index 0000000000000000000000000000000000000000..bbc46b858bb63ee565eb5cc057762746cfc7d1a4 --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0022.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c089d5fef1035693d88210a7fa2d797b845849a2239faf6ed6e1124f9259e84b +size 13267907 diff --git a/data/occurrences/13_01_23-DJI_0023.csv b/data/occurrences/13_01_23-DJI_0023.csv new file mode 100644 index 0000000000000000000000000000000000000000..60b9035692e323dc4e9e204e79c3ed1a67ca72da --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0023.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54ded963edf1eaeb70e8bc753351b438a673e01feba1b77d81e17f9bfa4a91d7 +size 21795825 diff --git a/data/occurrences/13_01_23-DJI_0024.csv b/data/occurrences/13_01_23-DJI_0024.csv new file mode 100644 index 0000000000000000000000000000000000000000..b7cff602f619dd1b6f771d0ff7c25a1227ec503c --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0024.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1516741845d4a926a920159878f298881ebb75ff4e83359bb776cbe2690d19a7 +size 20764115 diff --git a/data/occurrences/13_01_23-DJI_0029.csv b/data/occurrences/13_01_23-DJI_0029.csv new file mode 100644 index 0000000000000000000000000000000000000000..19b6d9b160e667c8fcc36dec3f7cc2f8ac0e9a90 --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0029.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76db9f9a0b32de8496616a2bfb4874d690c43b00128927706802d94eb016d65d +size 4526391 diff --git a/data/occurrences/13_01_23-DJI_0031.csv b/data/occurrences/13_01_23-DJI_0031.csv new file mode 100644 index 0000000000000000000000000000000000000000..0fd863e6dee9c1e1926e6571cefa3dcf6219ee38 --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0031.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc706e8dcba3caea6aa5f7c23b4bdd3d4d693841b8978299f6922befc7b70b95 +size 7834907 diff --git a/data/occurrences/13_01_23-DJI_0032.csv b/data/occurrences/13_01_23-DJI_0032.csv new file mode 100644 index 0000000000000000000000000000000000000000..1b6a1b965a77a9c3a69fb405341b32985fad829a --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0032.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50c4a57bbb42815cf6286454ffddf4c19cffcf8d9addae4973c4f646f5da762f +size 24098088 diff --git a/data/occurrences/13_01_23-DJI_0033.csv b/data/occurrences/13_01_23-DJI_0033.csv new file mode 100644 index 0000000000000000000000000000000000000000..f85fb5d7685c874d5aa3d9950611ca0671aeff0b --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0033.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e1a024fbb641d4d33c3d99202f0bf17fa505628d23ff222f5a42b31f619c944 +size 28102013 diff --git a/data/occurrences/13_01_23-DJI_0035.csv b/data/occurrences/13_01_23-DJI_0035.csv new file mode 100644 index 0000000000000000000000000000000000000000..a95bc0199d850d14234b6c4bd8a3ea4c8d3305fe --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0035.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aaa12b6ea3f628d4b7d64ea00df020dab7096f99285ed23c340c9eb9f19583b2 +size 1195985 diff --git a/data/occurrences/13_01_23-DJI_0036.csv b/data/occurrences/13_01_23-DJI_0036.csv new file mode 100644 index 0000000000000000000000000000000000000000..e5117d964d77243341ea0cb52bd03fe8107961e1 --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0036.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19d06fe004cd653358b2fd45a7729581e2e6d5379da0033175194f1a20eba1ca +size 7280882 diff --git a/data/occurrences/13_01_23-DJI_0037.csv b/data/occurrences/13_01_23-DJI_0037.csv new file mode 100644 index 0000000000000000000000000000000000000000..a81009b85135f59e8b6db52d1bfcf8936279927d --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0037.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50e6e6bb98c69f6a8d40a3a8bd5a6d69f73ce2173116776fb64dc0331c197456 +size 28862771 diff --git a/data/occurrences/13_01_23-DJI_0038.csv b/data/occurrences/13_01_23-DJI_0038.csv new file mode 100644 index 0000000000000000000000000000000000000000..2dd51fb113533833fc96dfdf42b9ba34acb97030 --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0038.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22593b4b450125cf5f380dd8978493394c1ebfb0d534a52729ee0e107e6e2cea +size 6426417 diff --git a/data/occurrences/13_01_23-DJI_0039.csv b/data/occurrences/13_01_23-DJI_0039.csv new file mode 100644 index 0000000000000000000000000000000000000000..172523a1a1721d961b77f5b45447bcd5fdbadd89 --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0039.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2f6b7aaec38b4b17343bcdf40a79f27165f66f6c21b3dd5a7f7a74aaefb1534 +size 17464554 diff --git a/data/occurrences/13_01_23-DJI_0040.csv b/data/occurrences/13_01_23-DJI_0040.csv new file mode 100644 index 0000000000000000000000000000000000000000..3622df62f6459c96eb80ef4603841ad2d695727a --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0040.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea2b254059a04308ca48ec3785e314bea344916ebd07f8c67ae42fe4b3fd3137 +size 73829509 diff --git a/data/occurrences/13_01_23-DJI_0041.csv b/data/occurrences/13_01_23-DJI_0041.csv new file mode 100644 index 0000000000000000000000000000000000000000..1a45942a9a425c47edd46f48a6f480a42dea4d47 --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0041.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f91e0b3e92eaff10d209a5ff8015a7b1c83732cbc977d188901e49c3a1383e7 +size 37163078 diff --git a/data/occurrences/13_01_23-DJI_0042.csv b/data/occurrences/13_01_23-DJI_0042.csv new file mode 100644 index 0000000000000000000000000000000000000000..a8e0e08fd4ce9c140de4e672783f8508fe55614b --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0042.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8100a3f59c6caa6d27fb0e7f2aef33c855b261c15275adfe5725073d8edc8c5 +size 131545325 diff --git a/data/occurrences/13_01_23-DJI_0043.csv b/data/occurrences/13_01_23-DJI_0043.csv new file mode 100644 index 0000000000000000000000000000000000000000..2a898f4d70b7dab8b9128ed2cfca0c6a0032bb4d --- /dev/null +++ b/data/occurrences/13_01_23-DJI_0043.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8306170cb37736ab0714faca6a810e085b6476e718e4b2b03b0f106c5c6745e5 +size 4416255 diff --git a/data/occurrences/16_01_23_flight_1-DJI_0001.csv b/data/occurrences/16_01_23_flight_1-DJI_0001.csv new file mode 100644 index 0000000000000000000000000000000000000000..a9b1d9ac14feb6f9ca1027d4282c0b648e344607 --- /dev/null +++ b/data/occurrences/16_01_23_flight_1-DJI_0001.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a68315f8cd40869e70a0a98138b08da2f5cc68df773feb6504780f4f3cefbbb2 +size 25789369 diff --git a/data/occurrences/16_01_23_flight_1-DJI_0002.csv b/data/occurrences/16_01_23_flight_1-DJI_0002.csv new file mode 100644 index 0000000000000000000000000000000000000000..54f48c7e6dfd29f338998fc067c5d071433d9dfd --- /dev/null +++ b/data/occurrences/16_01_23_flight_1-DJI_0002.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c9e0b2aab6d9e4db5c34c844f3e48b4c68639b0727478d14ce573c40e0cf6ad +size 90351965 diff --git a/data/occurrences/16_01_23_flight_1-DJI_0003.csv b/data/occurrences/16_01_23_flight_1-DJI_0003.csv new file mode 100644 index 0000000000000000000000000000000000000000..4abaa07e973b31f30de946afa327228e366e13a5 --- /dev/null +++ b/data/occurrences/16_01_23_flight_1-DJI_0003.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9f81e6faa82153c3a3716a56c4495a5683d1d245446895703bea52808b8ebf7 +size 98499971 diff --git a/data/occurrences/16_01_23_flight_2-DJI_0001.csv b/data/occurrences/16_01_23_flight_2-DJI_0001.csv new file mode 100644 index 0000000000000000000000000000000000000000..8c4a45867db8d0bbaf64b9eda104466287be8f7a --- /dev/null +++ b/data/occurrences/16_01_23_flight_2-DJI_0001.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e836ca3ffdc2da432bfc2c28807e5ac3d4f2fc6204e5a33e77f8fa0b62e2d48 +size 23856551 diff --git a/data/occurrences/16_01_23_flight_2-DJI_0002.csv b/data/occurrences/16_01_23_flight_2-DJI_0002.csv new file mode 100644 index 0000000000000000000000000000000000000000..013a789877d53d6beb9940fe372a24f5e84068bc --- /dev/null +++ b/data/occurrences/16_01_23_flight_2-DJI_0002.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c8846aead3c54aabd8753ad56bc8bb0ffecbd80ff18f78d10d418e39bf9cc15 +size 64329673 diff --git a/data/occurrences/16_01_23_flight_2-DJI_0003.csv b/data/occurrences/16_01_23_flight_2-DJI_0003.csv new file mode 100644 index 0000000000000000000000000000000000000000..1a4c3df20b57dfac94bec4e19ced2017b5dbc383 --- /dev/null +++ b/data/occurrences/16_01_23_flight_2-DJI_0003.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b325a42653886f4ab23c64b9c8af1bea09dd88ff04ff963315b00c91e6f6e47 +size 85046093 diff --git a/data/occurrences/16_01_23_flight_2-DJI_0004.csv b/data/occurrences/16_01_23_flight_2-DJI_0004.csv new file mode 100644 index 0000000000000000000000000000000000000000..cd5bd9ad610a5ebc51c1aa69d2acb95bdee674ae --- /dev/null +++ b/data/occurrences/16_01_23_flight_2-DJI_0004.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8f873e5f110ef4f480fc7d6bfbcf141926e003a9ff3a46b2d4709e2c58868e9 +size 53917955 diff --git a/data/occurrences/17_01_2023_session_1-DJI_0006.csv b/data/occurrences/17_01_2023_session_1-DJI_0006.csv new file mode 100644 index 0000000000000000000000000000000000000000..a5858b4e943a07cbd416f142fe1e1f0d21ad9d79 --- /dev/null +++ b/data/occurrences/17_01_2023_session_1-DJI_0006.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06625afa027c79c937de33cb67f1c34212e92470b748845a99b788789d293b3c +size 14238708 diff --git a/data/occurrences/17_01_2023_session_1-DJI_0007.csv b/data/occurrences/17_01_2023_session_1-DJI_0007.csv new file mode 100644 index 0000000000000000000000000000000000000000..a297d10f9a34df3ce986fbeaaf3d87f5686ff9c1 --- /dev/null +++ b/data/occurrences/17_01_2023_session_1-DJI_0007.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c586d1bbe51de3d448934258f2205c1dc0b656ff30b26fbc7cd716aca99cf007 +size 16140825 diff --git a/data/occurrences/17_01_2023_session_2-DJI_0008.csv b/data/occurrences/17_01_2023_session_2-DJI_0008.csv new file mode 100644 index 0000000000000000000000000000000000000000..66050b2877c6ddc8cb62f02e99428aa2eb1f3d6d --- /dev/null +++ b/data/occurrences/17_01_2023_session_2-DJI_0008.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07042f1cce31321e4d45da5a70288830dbdf6d28671da17b7cc41f324b3beaa7 +size 2633162 diff --git a/data/occurrences/17_01_2023_session_2-DJI_0010.csv b/data/occurrences/17_01_2023_session_2-DJI_0010.csv new file mode 100644 index 0000000000000000000000000000000000000000..323663b8b2ccf9255215e5d85eb969d25d0ee5a4 --- /dev/null +++ b/data/occurrences/17_01_2023_session_2-DJI_0010.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b713bb0d490cb7eeecffaedca935ed4b82ed6293d6912168a0c26914f2dcc18 +size 30948909 diff --git a/data/occurrences/17_01_2023_session_2-DJI_0011.csv b/data/occurrences/17_01_2023_session_2-DJI_0011.csv new file mode 100644 index 0000000000000000000000000000000000000000..e4a4573a053598923a31bc66e6394c4f606ab90c --- /dev/null +++ b/data/occurrences/17_01_2023_session_2-DJI_0011.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d554a6a74001d272aa56953333a5b592c37dcf416ee81d8201c36cb05512aa4f +size 30877129 diff --git a/data/occurrences/17_01_2023_session_2-DJI_0012.csv b/data/occurrences/17_01_2023_session_2-DJI_0012.csv new file mode 100644 index 0000000000000000000000000000000000000000..ac462db0ecbe570be3df3cfa8d5c9a96046987f1 --- /dev/null +++ b/data/occurrences/17_01_2023_session_2-DJI_0012.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c77e0793b2af2598b894b4fb8c4347dfa155b3624f55a0fa353f8ba76681910 +size 30969780 diff --git a/data/occurrences/17_01_2023_session_2-DJI_0013.csv b/data/occurrences/17_01_2023_session_2-DJI_0013.csv new file mode 100644 index 0000000000000000000000000000000000000000..b0de72fc15382fe8558c151037e63d61c7df8587 --- /dev/null +++ b/data/occurrences/17_01_2023_session_2-DJI_0013.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7e2f18c4ecf41b9d78419003846d9908a58bbc8bef14fffd538ee4b7cbee297 +size 23657660 diff --git a/data/session_events.csv b/data/session_events.csv index a2d21a7de1f9771f08698626e5d80217a007c850..e91e405c11d84f1ca2ba0f1f07457a46d59450c4 100644 --- a/data/session_events.csv +++ b/data/session_events.csv @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86cf41400ed25b3d655d4072fd5c3c2e9ca016d349d0d40bc10a80fbfc9d3a3a -size 19731 +oid sha256:413b6f40f961211e3becd6b34f5feb9850cf91ee4c9644fc52569ea5f36da65d +size 22306 diff --git a/data/video_events.csv b/data/video_events.csv index efc349b35a159a045833d8f4cf99d48cec56ff0e..f0b429b6d58b8dd9edb47d0866c4a754f76989ae 100644 --- a/data/video_events.csv +++ b/data/video_events.csv @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0242eaf58c4fb0cff768e1d1b06c2ba3cba9fef0d2a23018032e3a52e1cf3585 -size 12616 +oid sha256:6a07a6876b5974f9c9e4973fb1ccc112b42c2f049667acc246597c6ecada28cf +size 88171 diff --git a/metadata/DATA_DICTIONARY.md b/metadata/DATA_DICTIONARY.md new file mode 100644 index 0000000000000000000000000000000000000000..5ed20566583d2b632037227506a73a5f6d26ff88 --- /dev/null +++ b/metadata/DATA_DICTIONARY.md @@ -0,0 +1,382 @@ +# KABR Behavior Telemetry Data Dictionary + +This document provides detailed descriptions of all data files and their fields in the KABR Behavior Telemetry dataset. + +## Dataset Files + +### Core Data Files + +#### `data/occurrences/` +Frame-by-frame occurrence records for each video, combining detection tracks, behavior annotations, and telemetry data. + +**Files:** One CSV per video, named `{date}-{video_id}.csv` (e.g., `11_01_23-DJI_0977.csv`) + +**Key Fields:** +- `date`: Recording date in DD_MM_YY format +- `video_id`: DJI video identifier (e.g., DJI_0977) +- `frame`: Frame number in the video sequence (0-indexed) +- `date_time`: Timestamp in format "YYYY-MM-DD HH:MM:SS,milliseconds,microseconds" +- `id`: Mini-scene identifier (track ID for behavioral sequences) +- `latitude`: GPS latitude in decimal degrees (WGS84) +- `longitude`: GPS longitude in decimal degrees (WGS84) +- `altitude`: Altitude in meters above sea level +- `iso`: Camera ISO setting +- `shutter`: Shutter speed value +- `fnum`: Aperture f-number +- `ev`: Exposure value +- `ct`: Color temperature +- `color_md`: Color mode descriptor +- `focal_len`: Lens focal length in millimeters +- `dzoom_ratio`: Digital zoom ratio +- `xtl`, `ytl`, `xbr`, `ybr`: Bounding box coordinates (top-left and bottom-right) +- `z_order_x`, `z_order_y`: Display depth ordering for overlapping annotations +- `label`: Object class label (e.g., "zebra", "giraffe") +- `source`: Annotation source +- `keyframe_x`, `keyframe_y`: Whether frame is a tracking keyframe +- `outside_x`, `outside_y`: Whether object is outside frame bounds +- `occluded_x`, `occluded_y`: Whether object is occluded +- `points`: Polygon points for behavioral sequences +- `behaviour`: Behavioral classification (e.g., "walking", "grazing", "running") + +**Record Count:** Varies by video; typically 10,000-66,000 frames per video +**Coverage:** 47 videos with complete data +**Missing Values:** Frames without detections/annotations have null values for annotation fields + +--- + +#### `data/video_events.csv` +Darwin Core Event records for individual video recordings. + +**Record Count:** 68 videos (47 with occurrence data) + +**Fields:** + +**Identifiers:** +- `eventID`: Unique event identifier (format: "KABR-2023:{session}:{video_id}") +- `parentEventID`: Links to session_events (format: "KABR-2023:{session}") + +**Event Classification:** +- `eventType`: Type of sampling event ("video recording") +- `eventDate`: Recording date (ISO 8601: YYYY-MM-DD) +- `eventTime`: Video start time (HH:MM:SS) +- `endTime`: Video end time (HH:MM:SS) + +**Geographic Information:** +- `decimalLatitude`: Launch point latitude in decimal degrees (WGS84) +- `decimalLongitude`: Launch point longitude in decimal degrees (WGS84) +- `geodeticDatum`: Coordinate reference system ("WGS84") +- `minimumElevationInMeters`: Minimum altitude during video +- `maximumElevationInMeters`: Maximum altitude during video +- `footprintWKT`: Geographic bounding box in Well-Known Text format + +**Sampling Protocol:** +- `samplingProtocol`: "Continuous aerial video recording" + +**Associated Resources:** +- `associatedMedia`: JSON object with paths to detection and behavior annotation files + ```json + { + "detection": "path/to/DJI_XXXX_tracks.xml", + "behavior": ["path/to/trackID.xml", ...] + } + ``` + +**Remarks:** +- `eventRemarks`: Description of video file (e.g., "Video file DJI_0977.MP4") + +--- + +#### `data/session_events.csv` +Darwin Core Event records for field sessions (missions/flights). + +**Record Count:** 17 sessions (14 with occurrence data) + +**Fields:** + +**Identifiers and Classification:** +- `eventID`: Unique session identifier (format: "KABR-2023:{date}_{session}") +- `parentEventID`: Dataset identifier ("KABR-2023") +- `eventType`: Type of event ("drone survey") + +**Temporal Coverage:** +- `eventDate`: Session date (ISO 8601: YYYY-MM-DD) +- `eventTime`: Session start time (HH:MM:SS) +- `endTime`: Session end time (HH:MM:SS) +- `year`, `month`, `day`: Parsed date components + +**Geographic Coverage:** +- `launchLatitude`: Drone launch point latitude (from first video) +- `launchLongitude`: Drone launch point longitude (from first video) +- `decimalLatitude`: Latitude range across session "[min, max]" +- `decimalLongitude`: Longitude range across session "[min, max]" +- `geodeticDatum`: "WGS84" +- `coordinateUncertaintyInMeters`: GPS precision +- `minimumElevationInMeters`: Minimum altitude across all videos +- `maximumElevationInMeters`: Maximum altitude across all videos +- `footprintWKT`: Session bounding box in WKT format +- `locationID`: "MPALA-KENYA" +- `locality`: "Mpala Research Centre" +- `country`: "Kenya" +- `countryCode`: "KE" + +**Habitat and Environment:** +- `habitat`: Habitat type description (e.g., "Open grassy habitat with some scattered bushes") + +**Sampling Details:** +- `samplingProtocol`: Detailed protocol description +- `sampleSizeValue`: Empty (intended for area surveyed) +- `sampleSizeUnit`: "minutes" +- `samplingEffort`: Number and description of video segments + +**Taxonomic Information:** +- `organismQuantity`: Number of individual animals observed +- `organismQuantityType`: "herd size" + +**Observations:** +- `eventRemarks`: Field notes and observations +- `_species_common`: Common name(s) of observed species +- `_species_commonNames`: Additional common names +- `_n_individuals`: Number of individuals +- `_n_videos`: Number of videos in session + +**Associated Data:** +- `associatedMedia`: JSON object listing videos, focal follows, and scan samples + ```json + { + "video": ["DJI_0977", "DJI_0978", ...], + "focal": [], + "scan": [] + } + ``` +- `associatedReferences`: Airdata telemetry file names +- `_telemetry_file_raw`: Raw telemetry file status notes + +**Ecological Extensions (Humboldt Eco):** +- `eco:inventoryTypes`: "restrictedSearch" +- `eco:protocolNames`: "KABR Drone Video Survey Protocol" +- `eco:protocolDescriptions`: Protocol description +- `eco:protocolReferences`: "https://doi.org/10.48550/arXiv.2510.02030" +- `eco:isAbundanceReported`: True +- `eco:isAbundanceCapReported`: False +- `eco:abundanceUnit`: "individuals" +- `eco:isVegetationCoverReported`: True/False +- `eco:vegetationCoverUnit`: "Bitterlich score (0-10 scale)" (when applicable) +- `eco:isTaxonCompletenessReported`: True +- `eco:taxonCompletenessProtocols`: "All visible target taxa annotated in video frames with bounding boxes" +- `eco:isAbsenceReported`: False +- `eco:hasNonTargetTaxa`: Empty +- `eco:nonTargetTaxa`: Empty +- `eco:targetTaxonomicScope`: Scientific names of target species +- `eco:excludedTaxonomicScope`: Empty +- `eco:samplingPerformedBy`: Empty +- `eco:siteCount`: 1 +- `eco:siteNestingDescription`: "Single focal group follow per session" +- `eco:verbatimSiteDescriptions`: Empty + +**Quality Flags:** +- `_needs_verification`: Data quality flag + +--- + +### Scripts + +#### `scripts/merge_behavior_telemetry.py` +Merges SRT metadata, detection tracks, and behavior annotations into occurrence files. + +**Usage:** +```bash +python scripts/merge_behavior_telemetry.py \ + --data_path /path/to/video/directories \ + --outpath /path/to/output/ \ + [--skip-airdata] +``` + +**Arguments:** +- `--data_path`: Directory containing video folders (format: DATE-VIDEO_ID) +- `--session_data_path`: Path to SRT files (default: preset path) +- `--flight_logs_path`: Path to decrypted flight logs (default: preset path) +- `--skip-airdata`: Skip merging with flight log data +- `--write`: Whether to write output (default: True) +- `--outpath`: Output directory for CSV files + +**Input Requirements:** +- Video directories with structure: + - `metadata/{video_id}_tracks.xml`: Detection bounding boxes + - `actions/*.xml`: Behavior annotation files +- SRT files with GPS and camera metadata +- (Optional) Flight log CSV files with telemetry + +**Output:** +- One CSV per video in occurrence format + +--- + +#### `scripts/update_video_events.py` +Updates video_events.csv with associatedMedia paths to detection and behavior files. + +**Usage:** +```bash +python scripts/update_video_events.py \ + --video_events data/video_events.csv \ + --data_path /path/to/video/directories \ + [--output output_path.csv] +``` + +**What it does:** +- Scans video directories for detection and behavior annotation files +- Creates relative paths from kabr-behavior-telemetry/data to annotation files +- Updates associatedMedia field with JSON structure + +--- + +#### `scripts/add_event_times.py` +Extracts start and end times from occurrence files and adds to video_events.csv. + +**Usage:** +```bash +python scripts/add_event_times.py \ + --video_events data/video_events.csv \ + --occurrences data/occurrences/ \ + [--output output_path.csv] +``` + +**What it does:** +- Reads date_time from first and last rows of each occurrence file +- Extracts time component (HH:MM:SS) +- Updates eventTime and endTime fields + +--- + +#### `scripts/add_gps_data.py` +Extracts GPS statistics from occurrence files and adds to event files. + +**Usage:** +```bash +python scripts/add_gps_data.py \ + --video_events data/video_events.csv \ + --session_events data/session_events.csv \ + --occurrences data/occurrences/ \ + [--output_video output_video.csv] \ + [--output_session output_session.csv] +``` + +**What it does:** + +For **video_events.csv**: +- Extracts launch point (first GPS coordinate) +- Calculates min/max lat/lon bounds +- Determines altitude range +- Creates WKT footprint + +For **session_events.csv**: +- Uses first video's launch point as session launch +- Aggregates bounds across all videos in session +- Formats lat/lon as "[min, max]" ranges +- Creates session-level WKT footprint + +--- + +## Data Relationships + +### Hierarchical Structure +``` +KABR-2023 (Dataset) +├── KABR-2023:11_01_23_session_1 (Session) +│ ├── KABR-2023:11_01_23_session_1:DJI_0488 (Video Event) +│ │ └── occurrences/11_01_23-DJI_0488.csv (Frame records) +│ └── ... +├── KABR-2023:11_01_23_session_2 (Session) +│ ├── KABR-2023:11_01_23_session_2:DJI_0977 (Video Event) +│ │ └── occurrences/11_01_23-DJI_0977.csv (Frame records) +│ └── ... +... +``` + +### Linkages +- **session_events** ← linked by parentEventID ← **video_events** +- **video_events** ← associatedMedia paths ← **annotation files** +- **video_events** → referenced by occurrence files → **occurrences/** + +--- + +## Data Processing Pipeline + +1. **Field Collection**: Drone flights with video recording +2. **Video Processing**: Frame extraction and annotation +3. **Telemetry Extraction**: SRT files parsed for GPS and camera metadata +4. **Annotation**: Detection boxes and behavior labels added +5. **Merging** (`merge_behavior_telemetry.py`): Combine all data sources +6. **Event Creation**: Generate Darwin Core event records +7. **Metadata Enhancement**: + - Add associatedMedia paths (`update_video_events.py`) + - Add temporal bounds (`add_event_times.py`) + - Add GPS statistics (`add_gps_data.py`) + +--- + +## Darwin Core Compliance + +This dataset follows Darwin Core standards (TDWG) with extensions for ecological inventory data (Humboldt Eco). + +**Core Classes:** +- **Event**: Recording sessions and individual videos +- **Occurrence**: Frame-level animal detections (in occurrence files) + +**Key Standards:** +- Temporal data in ISO 8601 format +- Coordinates in WGS84 decimal degrees +- Controlled vocabularies for taxonomic names +- Unique identifiers for all records + +--- + +## File Formats + +### CSV Files +- **Encoding**: UTF-8 +- **Delimiter**: Comma (`,`) +- **Quoting**: Double quotes for fields containing delimiters +- **Missing Values**: Empty strings or `NaN` + +### JSON Fields +- **Structure**: Valid JSON objects in CSV fields +- **Encoding**: Double-quotes escaped as `""` +- **Arrays**: Square bracket notation `["item1", "item2"]` + +### WKT (Well-Known Text) +- **Format**: `POLYGON((lon1 lat1, lon2 lat2, ...))` +- **Coordinate Order**: Longitude first, then latitude +- **Datum**: WGS84 (EPSG:4326) + +--- + +## Quality Assurance + +### Known Limitations +- **Missing Occurrence Data**: 21 videos lack occurrence files due to: + - Missing SRT files + - Corrupted source data + - Processing errors + - Empty annotations + +- **GPS Accuracy**: ±5-10 meters typical +- **Timestamp Precision**: Millisecond resolution but may have ±1 second drift + +### Data Quality Flags +- `_needs_verification`: Manual review recommended +- `_telemetry_file_raw`: Notes on telemetry file status + +--- + +## Version History + +- **v1.1** (2026-01-02): Added GPS data, event times, and occurrence files +- **v1.0** (2024-12-31): Initial release with session and video events + +--- + +## Contact + +For questions about this data: +- **Email**: kline.377@osu.edu +- **Issues**: [GitHub repository](https://github.com/Imageomics/kabr-behavior-telemetry) diff --git a/scripts/add_event_times.py b/scripts/add_event_times.py new file mode 100644 index 0000000000000000000000000000000000000000..4d7e6a6ad33cafe0cdd84cbc045ebe976d38cf54 --- /dev/null +++ b/scripts/add_event_times.py @@ -0,0 +1,119 @@ +import pandas as pd +import os +from datetime import datetime + +def add_event_times( + video_events_path, + occurrences_path, + output_path=None +): + """ + Update video_events.csv with eventTime and endTime from occurrence files. + + Args: + video_events_path: Path to video_events.csv + occurrences_path: Path to occurrences directory + output_path: Path to write updated CSV (if None, overwrites input) + """ + # Read video_events.csv + df = pd.read_csv(video_events_path) + + # Parse the eventID to extract video_id + for idx, row in df.iterrows(): + event_id = row['eventID'] + parts = event_id.split(':') + + if len(parts) < 3: + print(f"Warning: Could not parse eventID: {event_id}") + continue + + date_session = parts[1] + video_id = parts[2] + + # Extract the date portion (without session) + date_parts = date_session.split('_session_') + if len(date_parts) > 1: + date_part = date_parts[0] + else: + date_part = date_session + + # Construct the occurrence filename + occurrence_file = f"{date_part}-{video_id}.csv" + occurrence_path = os.path.join(occurrences_path, occurrence_file) + + if not os.path.exists(occurrence_path): + print(f"⚠ {video_id}: No occurrence file found") + continue + + try: + # Read the occurrence file + occ_df = pd.read_csv(occurrence_path) + + if 'date_time' not in occ_df.columns or occ_df.empty: + print(f"⚠ {video_id}: No date_time data") + continue + + # Get first and last non-null date_time values + date_times = occ_df['date_time'].dropna() + + if date_times.empty: + print(f"⚠ {video_id}: All date_time values are null") + continue + + # Extract the first and last timestamps + # Format: "2023-01-11 16:04:03,114,286" + first_dt_str = str(date_times.iloc[0]) + last_dt_str = str(date_times.iloc[-1]) + + # Parse to extract just the time portion (HH:MM:SS) + first_time = first_dt_str.split(',')[0].split(' ')[1] if ' ' in first_dt_str else None + last_time = last_dt_str.split(',')[0].split(' ')[1] if ' ' in last_dt_str else None + + if first_time and last_time: + # Update the dataframe + df.at[idx, 'eventTime'] = first_time + df.at[idx, 'endTime'] = last_time + print(f"✓ {video_id}: {first_time} - {last_time}") + else: + print(f"⚠ {video_id}: Could not parse time") + + except Exception as e: + print(f"✗ {video_id}: Error - {str(e)}") + + # Write the updated CSV + if output_path is None: + output_path = video_events_path + + df.to_csv(output_path, index=False) + print(f"\nUpdated video_events.csv written to: {output_path}") + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Add event times to video_events.csv from occurrence files") + parser.add_argument( + "--video_events", + type=str, + required=True, + help="Path to video_events.csv" + ) + parser.add_argument( + "--occurrences", + type=str, + required=True, + help="Path to occurrences directory" + ) + parser.add_argument( + "--output", + type=str, + default=None, + help="Output path (default: overwrites input)" + ) + + args = parser.parse_args() + + add_event_times( + args.video_events, + args.occurrences, + args.output + ) diff --git a/scripts/add_gps_data.py b/scripts/add_gps_data.py new file mode 100644 index 0000000000000000000000000000000000000000..35b090a34365c21e13d6a6e8e82d43e71db741e6 --- /dev/null +++ b/scripts/add_gps_data.py @@ -0,0 +1,252 @@ +import pandas as pd +import numpy as np +import os +import json + +def extract_gps_from_occurrence(occurrence_path): + """ + Extract GPS statistics from an occurrence file. + + Returns: + dict with keys: launch_lat, launch_lon, min_lat, max_lat, min_lon, max_lon, min_alt, max_alt + """ + try: + # Read occurrence file with low_memory=False to avoid dtype warnings + occ_df = pd.read_csv(occurrence_path, low_memory=False) + + if occ_df.empty: + return None + + # Get GPS columns + lat_col = occ_df['latitude'].dropna() + lon_col = occ_df['longitude'].dropna() + alt_col = occ_df['altitude'].dropna() + + if lat_col.empty or lon_col.empty: + return None + + # Launch point is the first GPS coordinate + launch_lat = float(lat_col.iloc[0]) + launch_lon = float(lon_col.iloc[0]) + + # Calculate min/max + stats = { + 'launch_lat': launch_lat, + 'launch_lon': launch_lon, + 'min_lat': float(lat_col.min()), + 'max_lat': float(lat_col.max()), + 'min_lon': float(lon_col.min()), + 'max_lon': float(lon_col.max()), + } + + # Add altitude if available + if not alt_col.empty: + stats['min_alt'] = float(alt_col.min()) + stats['max_alt'] = float(alt_col.max()) + else: + stats['min_alt'] = None + stats['max_alt'] = None + + return stats + + except Exception as e: + print(f"Error processing {occurrence_path}: {str(e)}") + return None + + +def add_gps_to_video_events(video_events_path, occurrences_path, output_path=None): + """ + Add GPS columns to video_events.csv from occurrence files. + + Adds columns: + - decimalLatitude (launch point) + - decimalLongitude (launch point) + - minimumElevationInMeters + - maximumElevationInMeters + - footprintWKT (bounding box in WKT format) + """ + # Read video_events.csv + df = pd.read_csv(video_events_path) + + # Add new columns if they don't exist + new_columns = ['decimalLatitude', 'decimalLongitude', + 'minimumElevationInMeters', 'maximumElevationInMeters', + 'footprintWKT'] + + for col in new_columns: + if col not in df.columns: + df[col] = np.nan + + # Process each video + for idx, row in df.iterrows(): + event_id = row['eventID'] + parts = event_id.split(':') + + if len(parts) < 3: + continue + + date_session = parts[1] + video_id = parts[2] + + # Extract the date portion + date_parts = date_session.split('_session_') + date_part = date_parts[0] if len(date_parts) > 1 else date_session + + # Construct occurrence filename + # Try with underscore first (for flight_1, flight_2 format) + occurrence_file = f"{date_part}_{video_id}.csv" + occurrence_path = os.path.join(occurrences_path, occurrence_file) + + # If that doesn't exist, try with dash (for older format) + if not os.path.exists(occurrence_path): + occurrence_file = f"{date_part}-{video_id}.csv" + occurrence_path = os.path.join(occurrences_path, occurrence_file) + + if not os.path.exists(occurrence_path): + print(f"⚠ {video_id}: No occurrence file") + continue + + # Extract GPS data + gps_stats = extract_gps_from_occurrence(occurrence_path) + + if gps_stats is None: + print(f"⚠ {video_id}: No GPS data") + continue + + # Update video_events + df.at[idx, 'decimalLatitude'] = gps_stats['launch_lat'] + df.at[idx, 'decimalLongitude'] = gps_stats['launch_lon'] + + if gps_stats['min_alt'] is not None: + df.at[idx, 'minimumElevationInMeters'] = gps_stats['min_alt'] + df.at[idx, 'maximumElevationInMeters'] = gps_stats['max_alt'] + + # Create WKT footprint (bounding box) + wkt = f"POLYGON(({gps_stats['min_lon']} {gps_stats['min_lat']}, " \ + f"{gps_stats['max_lon']} {gps_stats['min_lat']}, " \ + f"{gps_stats['max_lon']} {gps_stats['max_lat']}, " \ + f"{gps_stats['min_lon']} {gps_stats['max_lat']}, " \ + f"{gps_stats['min_lon']} {gps_stats['min_lat']}))" + df.at[idx, 'footprintWKT'] = wkt + + print(f"✓ {video_id}: Launch ({gps_stats['launch_lat']:.6f}, {gps_stats['launch_lon']:.6f}), " + f"Bounds: lat[{gps_stats['min_lat']:.6f}, {gps_stats['max_lat']:.6f}], " + f"lon[{gps_stats['min_lon']:.6f}, {gps_stats['max_lon']:.6f}]") + + # Write updated CSV + if output_path is None: + output_path = video_events_path + + df.to_csv(output_path, index=False) + print(f"\nUpdated video_events.csv written to: {output_path}") + return df + + +def add_gps_to_session_events(session_events_path, video_events_df, output_path=None): + """ + Add GPS columns to session_events.csv by aggregating from video_events. + + For each session: + - launchLatitude/launchLongitude: Launch point of first video in session + - decimalLatitude: [min, max] latitude range as string + - decimalLongitude: [min, max] longitude range as string + - footprintWKT: Bounding box encompassing all videos in session + - minimumElevationInMeters/maximumElevationInMeters: Min/max across all videos + """ + # Read session_events.csv + session_df = pd.read_csv(session_events_path) + + # Add new columns if they don't exist + new_columns = ['launchLatitude', 'launchLongitude', + 'minimumElevationInMeters', 'maximumElevationInMeters', + 'footprintWKT'] + + for col in new_columns: + if col not in session_df.columns: + session_df[col] = np.nan + + # Process each session + for idx, row in session_df.iterrows(): + session_id = row['eventID'] + + # Get all videos for this session + session_videos = video_events_df[video_events_df['parentEventID'] == session_id] + + if session_videos.empty: + print(f"⚠ {session_id}: No videos found") + continue + + # Filter videos with GPS data + videos_with_gps = session_videos.dropna(subset=['decimalLatitude', 'decimalLongitude']) + + if videos_with_gps.empty: + print(f"⚠ {session_id}: No GPS data in videos") + continue + + # Launch point from first video + first_video = videos_with_gps.iloc[0] + session_df.at[idx, 'launchLatitude'] = first_video['decimalLatitude'] + session_df.at[idx, 'launchLongitude'] = first_video['decimalLongitude'] + + # Calculate session-level min/max across all videos + min_lat = videos_with_gps['decimalLatitude'].min() + max_lat = videos_with_gps['decimalLatitude'].max() + min_lon = videos_with_gps['decimalLongitude'].min() + max_lon = videos_with_gps['decimalLongitude'].max() + + # Set decimalLatitude and decimalLongitude to [min, max] ranges + session_df.at[idx, 'decimalLatitude'] = f"[{min_lat:.6f}, {max_lat:.6f}]" + session_df.at[idx, 'decimalLongitude'] = f"[{min_lon:.6f}, {max_lon:.6f}]" + + # Aggregate elevation + if 'minimumElevationInMeters' in videos_with_gps.columns: + elev_videos = videos_with_gps.dropna(subset=['minimumElevationInMeters']) + if not elev_videos.empty: + session_df.at[idx, 'minimumElevationInMeters'] = elev_videos['minimumElevationInMeters'].min() + session_df.at[idx, 'maximumElevationInMeters'] = elev_videos['maximumElevationInMeters'].max() + + # Create session footprint + wkt = f"POLYGON(({min_lon} {min_lat}, {max_lon} {min_lat}, " \ + f"{max_lon} {max_lat}, {min_lon} {max_lat}, {min_lon} {min_lat}))" + session_df.at[idx, 'footprintWKT'] = wkt + + print(f"✓ {session_id.split(':')[1]}: Launch ({first_video['decimalLatitude']:.6f}, {first_video['decimalLongitude']:.6f}), " + f"Session bounds: lat[{min_lat:.6f}, {max_lat:.6f}], lon[{min_lon:.6f}, {max_lon:.6f}]") + + # Write updated CSV + if output_path is None: + output_path = session_events_path + + session_df.to_csv(output_path, index=False) + print(f"\nUpdated session_events.csv written to: {output_path}") + + +def main(): + import argparse + + parser = argparse.ArgumentParser(description="Add GPS data to video_events and session_events") + parser.add_argument("--video_events", type=str, required=True, help="Path to video_events.csv") + parser.add_argument("--session_events", type=str, required=True, help="Path to session_events.csv") + parser.add_argument("--occurrences", type=str, required=True, help="Path to occurrences directory") + parser.add_argument("--output_video", type=str, default=None, help="Output path for video_events (default: overwrite)") + parser.add_argument("--output_session", type=str, default=None, help="Output path for session_events (default: overwrite)") + + args = parser.parse_args() + + print("=" * 80) + print("STEP 1: Adding GPS data to video_events.csv") + print("=" * 80) + video_df = add_gps_to_video_events(args.video_events, args.occurrences, args.output_video) + + print("\n" + "=" * 80) + print("STEP 2: Adding GPS data to session_events.csv") + print("=" * 80) + add_gps_to_session_events(args.session_events, video_df, args.output_session) + + print("\n" + "=" * 80) + print("DONE!") + print("=" * 80) + + +if __name__ == "__main__": + main() diff --git a/scripts/merge_behavior_telemetry.py b/scripts/merge_behavior_telemetry.py new file mode 100644 index 0000000000000000000000000000000000000000..71c577111348db8c5743daf45368c2ec7306deaa --- /dev/null +++ b/scripts/merge_behavior_telemetry.py @@ -0,0 +1,353 @@ +import re +import os +import json +import pysrt +import argparse +import pandas as pd +from tqdm import tqdm +from glob import glob +from datetime import datetime +import xml.etree.ElementTree as ET + +" Based on script authored by Otto Brookes for KABR-2023 project " + + +def pandify_xml_tracks(path2tracks): + elems = [] + et = ET.parse(path2tracks) + root = et.getroot() + for row in root: + for e in row.iter("box"): + for k, v in row.attrib.items(): + e.attrib[k] = v + elems.append(e.attrib) + track_df = pd.DataFrame(elems) + track_df["frame"] = track_df.frame.astype(int) + return track_df + + +def extract_frame_no(text): + pattern = r": (\d+)," + matches = re.findall(pattern, text) + numbers = [int(match) for match in matches] + assert len(numbers) == 1, "Frame index must be unique" + return next(iter(numbers)) + + +def extract_meta_data(text): + # Extract all [text] + pattern = r"\[(.*?)\]" + matches = re.findall(pattern, text) + data_dict = {} + for item in matches: + key_value = item.split(":", 1) + key = key_value[0].strip() + value = key_value[1].strip() + data_dict[key] = value + return data_dict + + +def pandify_srt_data(path2srt): + subs = pysrt.open(path2srt) + all_meta_data = [] + for s in subs: + split_text = s.text.split("\n") + meta_data = extract_meta_data(split_text[2]) + meta_data["frame"] = extract_frame_no(split_text[0]) + meta_data["date_time"] = split_text[1] + all_meta_data.append(meta_data) + srt_df = pd.DataFrame(all_meta_data) + srt_df["frame"] = srt_df["frame"] - 1 + return srt_df + + +def get_per_frame_annotations(path2xml): + et = ET.parse(path2xml) + root = et.getroot() + per_frame_annotations = [] + for row in root.findall("track"): + for e, j in zip(row.iter("points"), row.iter("attribute")): + behaviour = j.text + e.attrib["behaviour"] = behaviour + per_frame_annotations.append(e.attrib) + return per_frame_annotations + + +def add_per_frame_behaviours(merged_df, path2annotations): + mini_scenes_df = None + ms_annotations = glob(f"{path2annotations}/**/*.xml", recursive=True) + for ms in ms_annotations: + ms_index = ms.split("/")[-1].split(".")[0] + ms_df = merged_df[merged_df.id == str(ms_index)].sort_values(by="frame") + first_frame = ms_df.frame.iloc[0] # ugly - rework later + per_frame_anns = pd.DataFrame(get_per_frame_annotations(ms)) + per_frame_anns["frame"] = per_frame_anns.frame.astype(int) + first_frame + ms_df = ms_df.merge(per_frame_anns, on="frame") + if mini_scenes_df is None: + mini_scenes_df = ms_df + else: + mini_scenes_df = pd.concat([mini_scenes_df, ms_df]) + return mini_scenes_df + + +def find_srt_file(session_data_root, date_part, filename): + """ + Recursively search for SRT file matching the date and filename. + + Args: + session_data_root: Root path to session_data directory + date_part: Date portion of the directory name (e.g., '11_01_23' or '17_01_2023_session_1') + filename: DJI filename (e.g., 'DJI_0488') + + Returns: + Path to SRT file if found, None otherwise + """ + # Try to find the date directory in session_data + date_dir = os.path.join(session_data_root, date_part) + if not os.path.exists(date_dir): + # Try without session suffix for cases like '16_01_23_session_1' -> '16_01_23' + base_date = date_part.split('_session_')[0] + date_dir = os.path.join(session_data_root, base_date) + + if not os.path.exists(date_dir): + print(f"Warning: Could not find date directory for {date_part}") + return None + + # Recursively search for the SRT file + srt_filename = f"{filename}.SRT" + for root, dirs, files in os.walk(date_dir): + if srt_filename in files: + return os.path.join(root, srt_filename) + + return None + + +def find_flight_log(flight_logs_path, srt_df): + """ + Find the matching flight log CSV based on datetime from SRT data. + + Args: + flight_logs_path: Path to decrypted_flight_logs directory + srt_df: DataFrame with SRT data containing date_time column + + Returns: + Path to matching flight log CSV, or None if not found + """ + if srt_df.empty or 'date_time' not in srt_df.columns: + return None + + # Get first datetime from SRT (format: "2023-01-11 16:04:03,681,492") + first_datetime_str = srt_df['date_time'].iloc[0] + # Parse just the date and time part (ignore milliseconds) + srt_datetime = datetime.strptime(first_datetime_str.split(',')[0], "%Y-%m-%d %H:%M:%S") + + # Search for matching flight log + flight_logs = glob(f"{flight_logs_path}/*.csv") + + for log_path in flight_logs: + # Read full file to get complete time range (many files are small) + try: + log_df = pd.read_csv(log_path) + if 'datetime(utc)' not in log_df.columns or log_df.empty: + continue + + # Convert to datetime and add 3 hours (flight logs are 3 hours behind) + log_df['datetime_corrected'] = pd.to_datetime(log_df['datetime(utc)']) + pd.Timedelta(hours=3) + + # Check if SRT datetime falls within flight log timerange + log_start = log_df['datetime_corrected'].min() + log_end = log_df['datetime_corrected'].max() + + # Skip if dates are invalid + if pd.isna(log_start) or pd.isna(log_end): + continue + + if log_start <= srt_datetime <= log_end: + return log_path + except Exception as e: + continue + + return None + + +def merge_flight_log_data(merged_df, flight_log_path): + """ + Merge flight log data with the main dataframe based on datetime. + + Args: + merged_df: Main dataframe with date_time column + flight_log_path: Path to flight log CSV + + Returns: + Merged dataframe with flight log data + """ + if flight_log_path is None or not os.path.exists(flight_log_path): + return merged_df + + try: + # Read flight log + flight_df = pd.read_csv(flight_log_path) + + # Prepare datetime columns for merging + # SRT format: "2023-01-11 16:04:03,681,492" -> convert to "2023-01-11 16:04:03" + merged_df['datetime_merge'] = merged_df['date_time'].apply( + lambda x: x.split(',')[0] if pd.notna(x) else None + ) + + # Flight log format: "2023-01-11 07:45:46" + # IMPORTANT: Flight log datetimes are 3 hours behind actual time - add 3 hours + flight_df['datetime_merge'] = pd.to_datetime(flight_df['datetime(utc)']) + pd.Timedelta(hours=3) + + # Merge on datetime + merged_df['datetime_merge'] = pd.to_datetime(merged_df['datetime_merge']) + + # Use merge_asof for nearest time matching + merged_df = merged_df.sort_values('datetime_merge') + flight_df = flight_df.sort_values('datetime_merge') + + # Merge with flight log data + result_df = pd.merge_asof( + merged_df, + flight_df, + on='datetime_merge', + direction='nearest', + tolerance=pd.Timedelta('2s'), # Increased tolerance to 2 seconds + suffixes=('', '_flight') + ) + + # Drop temporary merge column and handle duplicate columns + result_df = result_df.drop('datetime_merge', axis=1) + + # Remove duplicate latitude/longitude/altitude columns from flight log if they exist + # Keep the SRT versions (more accurate for video frames) + for col in ['latitude', 'longitude', 'altitude']: + if f'{col}_flight' in result_df.columns: + result_df = result_df.drop(f'{col}_flight', axis=1) + + print(f" Merged with flight log: {os.path.basename(flight_log_path)}") + return result_df + + except Exception as e: + print(f" Warning: Could not merge flight log: {str(e)}") + if 'datetime_merge' in merged_df.columns: + merged_df = merged_df.drop('datetime_merge', axis=1) + return merged_df + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--data_path", + type=str, + help="Please use the full path to the data dir in this repo!", + ) + parser.add_argument( + "--session_data_path", + type=str, + default="/fs/ess/PAS2136/Kenya-2023/Zebras/session_data", + help="Path to session_data directory containing SRT files", + ) + parser.add_argument( + "--flight_logs_path", + type=str, + default="/fs/ess/PAS2136/Kenya-2023/Zebras/Flight_Logs/decrypted_flight_logs", + help="Path to decrypted_flight_logs directory", + ) + parser.add_argument( + "--skip-airdata", + action="store_true", + help="Skip merging with airdata/flight log files", + ) + parser.add_argument("--write", type=bool, default=True) + parser.add_argument("--outpath", type=str, help="Path to write csvs to") + args = parser.parse_args() + + path2data = args.data_path + session_data_root = args.session_data_path + flight_logs_path = args.flight_logs_path + data_dirs = [x for x in os.listdir(path2data) if not x.startswith(".")] + path2write = args.outpath + + good = 0 + fail = 0 + failed_files = [] + + for d in tqdm(data_dirs): + try: + # Parse directory name to get date and filename + # Format: DATE-FILENAME (e.g., '11_01_23-DJI_0488' or '17_01_2023_session_1-DJI_0005') + parts = d.split("-") + date_part = parts[0] + filename = parts[-1] + + # Formulate paths + path2tracks = f"{path2data}/{d}/metadata/{filename}_tracks.xml" + path2annotations = f"{path2data}/{d}/actions/" + + # Find SRT file recursively + path2srt = find_srt_file(session_data_root, date_part, filename) + + if path2srt is None: + raise FileNotFoundError(f"Could not find SRT file for {date_part}-{filename}") + + print(f"Processing {d}: Found SRT at {path2srt}") + + # initialise dfs: + srt_df = pandify_srt_data(path2srt) + track_df = pandify_xml_tracks(path2tracks) + merged_df = srt_df.merge(track_df, on="frame", how="left") + + # Add date and video_id columns to ALL rows + merged_df.insert(0, "date", date_part) + merged_df.insert(1, "video_id", filename) + + # Move frame to position 2 + frame_col = merged_df.pop("frame") + merged_df.insert(2, "frame", frame_col) + + # Move id (mini-scene id) to position 3 + if "id" in merged_df.columns: + id_col = merged_df.pop("id") + merged_df.insert(3, "id", id_col) + + # Ensure date_time is preserved (move to position 4) + if "date_time" in merged_df.columns: + datetime_col = merged_df.pop("date_time") + merged_df.insert(4, "date_time", datetime_col) + + # Find and merge flight log data if path provided and not skipped + if flight_logs_path and not args.skip_airdata: + flight_log_path = find_flight_log(flight_logs_path, srt_df) + if flight_log_path: + merged_df = merge_flight_log_data(merged_df, flight_log_path) + + # Add per frame behaviours to existing df + mini_scene_df = add_per_frame_behaviours(merged_df, path2annotations) + + # Merge with frame df to preserve all frames (including those without annotations) + frame_df = merged_df[['date', 'video_id', 'frame', 'date_time']] + mini_scene_df = frame_df.merge( + mini_scene_df, on="frame", how="left" + ) + + # Remove duplicate date/video_id columns if they exist + for col in ['date_x', 'date_y', 'video_id_x', 'video_id_y', 'date_time_x', 'date_time_y']: + if col in mini_scene_df.columns: + # Keep the non-null version + base_col = col.rsplit('_', 1)[0] + if f'{base_col}_x' in mini_scene_df.columns and f'{base_col}_y' in mini_scene_df.columns: + mini_scene_df[base_col] = mini_scene_df[f'{base_col}_x'].fillna(mini_scene_df[f'{base_col}_y']) + mini_scene_df = mini_scene_df.drop([f'{base_col}_x', f'{base_col}_y'], axis=1) + + if args.write: + mini_scene_df.sort_values(by="frame").to_csv(path2write+f"{d}.csv", index=False) + good += 1 + except Exception as e: + failed_files.append(d) + print(f"Failed on {d}: {str(e)}") + fail += 1 + print("Pass: ", good, "Fail: ", fail) + print("Failed files:", failed_files) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/scripts/update_video_events.py b/scripts/update_video_events.py new file mode 100644 index 0000000000000000000000000000000000000000..3fa7faec3ad4cc85dc04d30f05dd48ce3626da3a --- /dev/null +++ b/scripts/update_video_events.py @@ -0,0 +1,119 @@ +import pandas as pd +import json +import os +from pathlib import Path + +def update_video_events( + video_events_path, + data_path, + output_path=None +): + """ + Update video_events.csv with associatedMedia paths for detections and behavior annotations. + + Args: + video_events_path: Path to video_events.csv + data_path: Path to the data directory containing video directories + output_path: Path to write updated CSV (if None, overwrites input) + """ + # Read video_events.csv + df = pd.read_csv(video_events_path) + + # Parse the eventID to extract date and video_id + # Format: KABR-2023:DATE_SESSION:VIDEO_ID + for idx, row in df.iterrows(): + event_id = row['eventID'] + parts = event_id.split(':') + + if len(parts) < 3: + print(f"Warning: Could not parse eventID: {event_id}") + continue + + date_session = parts[1] + video_id = parts[2] + + # Extract the date portion (without session) + # e.g., "11_01_23_session_1" -> "11_01_23" + date_parts = date_session.split('_session_') + if len(date_parts) > 1: + date_part = date_parts[0] + else: + date_part = date_session + + # Construct the directory name + dir_name = f"{date_part}-{video_id}" + + # Build paths to detections and behavior files + detections_path = os.path.join(data_path, dir_name, "metadata", f"{video_id}_tracks.xml") + + # For behavior annotations, we need to find all XML files in the actions directory + actions_dir = os.path.join(data_path, dir_name, "actions") + behavior_files = [] + + if os.path.exists(actions_dir): + behavior_files = [f for f in os.listdir(actions_dir) if f.endswith('.xml')] + behavior_files.sort() # Sort for consistency + + # Check if files exist + detections_exists = os.path.exists(detections_path) + + # Create relative paths from the kabr-behavior-telemetry/data directory + detections_rel = f"../../../mini-scenes_zebras/kabr-datapalooza-2023/data/{dir_name}/metadata/{video_id}_tracks.xml" if detections_exists else "" + + # Create behavior annotations list with relative paths + behavior_rel_list = [] + if behavior_files: + for bf in behavior_files: + behavior_rel_list.append(f"../../../mini-scenes_zebras/kabr-datapalooza-2023/data/{dir_name}/actions/{bf}") + + # Update the associatedMedia field with JSON structure + associated_media = { + "detection": detections_rel, + "behavior": behavior_rel_list + } + + # Update the dataframe + df.at[idx, 'associatedMedia'] = json.dumps(associated_media) + + # Print status + status = "✓" if detections_exists else "✗" + behavior_count = len(behavior_files) if behavior_files else 0 + print(f"{status} {video_id}: detections={detections_exists}, behaviors={behavior_count}") + + # Write the updated CSV + if output_path is None: + output_path = video_events_path + + df.to_csv(output_path, index=False) + print(f"\nUpdated video_events.csv written to: {output_path}") + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Update video_events.csv with associatedMedia paths") + parser.add_argument( + "--video_events", + type=str, + required=True, + help="Path to video_events.csv" + ) + parser.add_argument( + "--data_path", + type=str, + required=True, + help="Path to data directory containing video directories" + ) + parser.add_argument( + "--output", + type=str, + default=None, + help="Output path (default: overwrites input)" + ) + + args = parser.parse_args() + + update_video_events( + args.video_events, + args.data_path, + args.output + )