| dataset_info: | |
| features: | |
| - name: id | |
| dtype: int64 | |
| - name: language | |
| dtype: string | |
| - name: verdict | |
| dtype: string | |
| - name: problem_id | |
| dtype: string | |
| splits: | |
| - name: train | |
| num_bytes: 705630705 | |
| num_examples: 13065846 | |
| download_size: 101643454 | |
| dataset_size: 705630705 | |
| configs: | |
| - config_name: default | |
| data_files: | |
| - split: train | |
| path: data/train-* | |
| task_categories: | |
| - text-generation | |
| tags: | |
| - code-generation | |
| - benchmark | |
| - c++ | |
| - python | |
| Dataset for [Can LLMs Generate High-Quality Test Cases for Algorithm Problems? TestCase-Eval: A Systematic Evaluation of Fault Coverage and Exposure](https://huggingface.co/papers/2506.12278) | |
| Github: https://github.com/FlowRays/TestCase-Eval |