Datasets:
Tasks:
Video Classification
Formats:
csv
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
medical
License:
Update README.md
Browse files
README.md
CHANGED
|
@@ -282,15 +282,15 @@ OmniFall addresses three critical limitations in current fall detection research
|
|
| 282 |
|
| 283 |
This benchmark includes annotations for the following datasets:
|
| 284 |
|
| 285 |
-
1. **CMDFall** (7h
|
| 286 |
-
2. **UP Fall** (4h 35m) -
|
| 287 |
-
3. **Le2i** (47m) -
|
| 288 |
-
4. **GMDCSA24** (21m) -
|
| 289 |
-
5. **CAUCAFall** (16m)
|
| 290 |
-
6. **
|
| 291 |
-
7. **
|
| 292 |
-
8. **MCFD** (12m) -
|
| 293 |
-
9. **OOPS-Fall** - Curated subset of genuine fall accidents from the OOPS dataset
|
| 294 |
|
| 295 |
## Structure
|
| 296 |
|
|
@@ -421,14 +421,89 @@ for split_name, split_data in [("CV", cv_split), ("CS", cs_split)]:
|
|
| 421 |
|
| 422 |
## Citation
|
| 423 |
|
| 424 |
-
If you use OmniFall in your research, please cite our paper (will be updated soon):
|
| 425 |
|
| 426 |
```bibtex
|
| 427 |
-
@inproceedings{
|
| 428 |
title={OmniFall: A Unified Staged-to-Wild Benchmark for Human Fall Detection},
|
| 429 |
author={},
|
| 430 |
booktitle={},
|
| 431 |
year={2025},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 432 |
}
|
| 433 |
```
|
| 434 |
|
|
|
|
| 282 |
|
| 283 |
This benchmark includes annotations for the following datasets:
|
| 284 |
|
| 285 |
+
1. **[CMDFall](https://www.mica.edu.vn/perso/Tran-Thi-Thanh-Hai/CMDFALL.html)** (7h 25m single view) - 50 subjects, 7 synchronized views
|
| 286 |
+
2. **[UP Fall](https://sites.google.com/up.edu.mx/har-up/)** (4h 35m) - 17 subjects, 2 synchronized views
|
| 287 |
+
3. **[Le2i](https://search-data.ubfc.fr/imvia/FR-13002091000019-2024-04-09_Fall-Detection-Dataset.html)** (47m) - 9 subjects, 6 different rooms
|
| 288 |
+
4. **[GMDCSA24](https://github.com/ekramalam/GMDCSA24-A-Dataset-for-Human-Fall-Detection-in-Videos)** (21m) - 4 subjects, 3 rooms
|
| 289 |
+
5. **[CAUCAFall](https://data.mendeley.com/datasets/7w7fccy7ky/4)** (16m) - 10 subjects, 1 room
|
| 290 |
+
6. **[EDF](https://doi.org/10.5281/zenodo.15494102)** (13m) - 5 subjects, 2 views synchronized
|
| 291 |
+
7. **[OCCU](https://doi.org/10.5281/zenodo.15494102)** (14m) - 5 subjects, 2 views not synchronized
|
| 292 |
+
8. **[MCFD](https://www.iro.umontreal.ca/~labimage/Dataset/)** (12m) - 1 subject, 8 views
|
| 293 |
+
9. **[OOPS-Fall](https://oops.cs.columbia.edu/data/)** - Curated subset of genuine fall accidents from the OOPS dataset, strong variation in subjects and views.
|
| 294 |
|
| 295 |
## Structure
|
| 296 |
|
|
|
|
| 421 |
|
| 422 |
## Citation
|
| 423 |
|
| 424 |
+
If you use OmniFall in your research, please cite our paper (will be updated soon) as well as all sub-dataset papers:
|
| 425 |
|
| 426 |
```bibtex
|
| 427 |
+
@inproceedings{omnifall,
|
| 428 |
title={OmniFall: A Unified Staged-to-Wild Benchmark for Human Fall Detection},
|
| 429 |
author={},
|
| 430 |
booktitle={},
|
| 431 |
year={2025},
|
| 432 |
+
},
|
| 433 |
+
|
| 434 |
+
@inproceedings{omnifall_cmdfall,
|
| 435 |
+
title={A multi-modal multi-view dataset for human fall analysis and preliminary investigation on modality},
|
| 436 |
+
author={Tran, Thanh-Hai and Le, Thi-Lan and Pham, Dinh-Tan and Hoang, Van-Nam and Khong, Van-Minh and Tran, Quoc-Toan and Nguyen, Thai-Son and Pham, Cuong},
|
| 437 |
+
booktitle={2018 24th International Conference on Pattern Recognition (ICPR)},
|
| 438 |
+
pages={1947--1952},
|
| 439 |
+
year={2018},
|
| 440 |
+
organization={IEEE}
|
| 441 |
+
},
|
| 442 |
+
|
| 443 |
+
@article{omnifall_up-fall,
|
| 444 |
+
title={UP-fall detection dataset: A multimodal approach},
|
| 445 |
+
author={Mart{\'\i}nez-Villase{\~n}or, Lourdes and Ponce, Hiram and Brieva, Jorge and Moya-Albor, Ernesto and N{\'u}{\~n}ez-Mart{\'\i}nez, Jos{\'e} and Pe{\~n}afort-Asturiano, Carlos},
|
| 446 |
+
journal={Sensors},
|
| 447 |
+
volume={19},
|
| 448 |
+
number={9},
|
| 449 |
+
pages={1988},
|
| 450 |
+
year={2019},
|
| 451 |
+
publisher={MDPI}
|
| 452 |
+
},
|
| 453 |
+
|
| 454 |
+
@article{omnifall_le2i,
|
| 455 |
+
title={Optimized spatio-temporal descriptors for real-time fall detection: comparison of support vector machine and Adaboost-based classification},
|
| 456 |
+
author={Charfi, Imen and Miteran, Johel and Dubois, Julien and Atri, Mohamed and Tourki, Rached},
|
| 457 |
+
journal={Journal of Electronic Imaging},
|
| 458 |
+
volume={22},
|
| 459 |
+
number={4},
|
| 460 |
+
pages={041106--041106},
|
| 461 |
+
year={2013},
|
| 462 |
+
publisher={Society of Photo-Optical Instrumentation Engineers}
|
| 463 |
+
},
|
| 464 |
+
|
| 465 |
+
@article{omnifall_gmdcsa,
|
| 466 |
+
title={GMDCSA-24: A dataset for human fall detection in videos},
|
| 467 |
+
author={Alam, Ekram and Sufian, Abu and Dutta, Paramartha and Leo, Marco and Hameed, Ibrahim A},
|
| 468 |
+
journal={Data in Brief},
|
| 469 |
+
volume={57},
|
| 470 |
+
pages={110892},
|
| 471 |
+
year={2024},
|
| 472 |
+
publisher={Elsevier}
|
| 473 |
+
},
|
| 474 |
+
|
| 475 |
+
@article{omnifall_cauca,
|
| 476 |
+
title={Dataset CAUCAFall},
|
| 477 |
+
author={Eraso, Jose Camilo and Mu{\~n}oz, Elena and Mu{\~n}oz, Mariela and Pinto, Jesus},
|
| 478 |
+
journal={Mendeley Data},
|
| 479 |
+
volume={4},
|
| 480 |
+
year={2022}
|
| 481 |
+
},
|
| 482 |
+
|
| 483 |
+
@inproceedings{omnifall_edf_occu,
|
| 484 |
+
title={Evaluating depth-based computer vision methods for fall detection under occlusions},
|
| 485 |
+
author={Zhang, Zhong and Conly, Christopher and Athitsos, Vassilis},
|
| 486 |
+
booktitle={International symposium on visual computing},
|
| 487 |
+
pages={196--207},
|
| 488 |
+
year={2014},
|
| 489 |
+
organization={Springer}
|
| 490 |
+
},
|
| 491 |
+
|
| 492 |
+
@article{omnifall_mcfd,
|
| 493 |
+
title={Multiple cameras fall dataset},
|
| 494 |
+
author={Auvinet, Edouard and Rougier, Caroline and Meunier, Jean and St-Arnaud, Alain and Rousseau, Jacqueline},
|
| 495 |
+
journal={DIRO-Universit{\'e} de Montr{\'e}al, Tech. Rep},
|
| 496 |
+
volume={1350},
|
| 497 |
+
pages={24},
|
| 498 |
+
year={2010}
|
| 499 |
+
},
|
| 500 |
+
|
| 501 |
+
@inproceedings{omnifall_oops,
|
| 502 |
+
title={Oops! predicting unintentional action in video},
|
| 503 |
+
author={Epstein, Dave and Chen, Boyuan and Vondrick, Carl},
|
| 504 |
+
booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},
|
| 505 |
+
pages={919--929},
|
| 506 |
+
year={2020}
|
| 507 |
}
|
| 508 |
```
|
| 509 |
|