Datasets:

Modalities:
Video
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
File size: 5,984 Bytes
a3f32e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
from collections import defaultdict
import json
from pathlib import Path
from typing import List, Tuple
from datasets import Dataset, DatasetDict, Video
import numpy as np
from torch.utils.data import DataLoader

from huggingface_hub import HfApi


def create_splits(path: Path, split: Tuple[float, float, float]) -> Tuple[List[str], List[str], List[str]]:
    # find all json files
    json_files = list(path.glob("**/*.json"))
    print(f"Found {len(json_files)} json files")
    
    participants_length = defaultdict(float)
    for json_file in json_files:
        with json_file.open("r") as f:
            participant = json_file.parts[-3]
            session = json_file.parts[-2]
            data = json.load(f)
            length = data[-1]["end_t"]
            participants_length[participant] += length

    # split the participants such that their length is approximately as the percentage from the split tuple
    total_length = sum(participants_length.values())
    train_target = total_length * split[0]
    valid_target = total_length * split[1]
    
    # Sort participants by length for more balanced distribution
    sorted_participants = sorted(participants_length.items(), key=lambda x: x[1], reverse=True)
    
    # Assign participants to splits
    train_participants = []
    valid_participants = []
    test_participants = []
    
    train_length = 0
    valid_length = 0
    test_length = 0
    
    for participant, length in sorted_participants:
        # Add to the split with the largest deficit compared to target
        train_deficit = train_target - train_length if train_length < train_target else -float('inf')
        valid_deficit = valid_target - valid_length if valid_length < valid_target else -float('inf')
        
        # Choose split based on deficit
        if train_deficit >= valid_deficit and train_deficit > -float('inf'):
            train_participants.append(participant)
            train_length += length
        elif valid_deficit > -float('inf'):
            valid_participants.append(participant)
            valid_length += length
        else:
            test_participants.append(participant)
            test_length += length

    print(f"Effective splits: {train_length/total_length:.2f}, {valid_length/total_length:.2f}, {test_length/total_length:.2f}")

    return train_participants, valid_participants, test_participants

def get_smpl_pose(smpl_path: Path, start_t: float, end_t: float, fps: int = 30):
    smpl_pose = np.load(smpl_path)
    start_frame = int(start_t * fps)
    end_frame = int(end_t * fps)
    pose = {
        "poses": smpl_pose["poses"][start_frame:end_frame],
        "trans": smpl_pose["trans"][start_frame:end_frame],
        "betas": smpl_pose["betas"],
        "gender": smpl_pose["gender"],
    }

    return pose


def create_dataset_dict(path: Path, split: Tuple[float, float, float] = (0.7, 0.1, 0.2)):
    assert sum(split) == 1

    splits = create_splits(path, split)

    ds = {"train": defaultdict(list), "val": defaultdict(list), "test": defaultdict(list)}
    for split, participants in zip(["train", "val", "test"], splits):
        for participant in participants:
            # get all the json files for the participant
            json_files = list(path.glob(f"**/{participant}/**/*.json"))
            for json_file in json_files:
                with json_file.open("r") as f:
                    data = json.load(f)
                    for action in data:
                        # skip with 99% probability
                        if np.random.rand() < 0.95:
                            continue
                        session = json_file.parts[-2]
                        data_folder_relative = json_file.parent.relative_to(path.parent)
                        entry = {
                            "participant": participant,
                            "session": session,
                            "start_t": action["start_t"],
                            "end_t": action["end_t"],
                            "action": action["act_cat"],
                            "video_head": str(data_folder_relative / "Head_anonymized.mp4"),
                            "video_pelvis": str(data_folder_relative / "Pelvis_anonymized.mp4"),
                            "video_left_hand": str(data_folder_relative / "LeftHand_anonymized.mp4"),
                            "video_right_hand": str(data_folder_relative / "RightHand_anonymized.mp4"),
                            "video_left_knee": str(data_folder_relative / "LeftKnee_anonymized.mp4"),
                            "video_right_knee": str(data_folder_relative / "RightKnee_anonymized.mp4"),
                            **get_smpl_pose(data_folder_relative / "smplx.npz", action["start_t"], action["end_t"])
                        }
                        for key in entry:
                            ds[split][key].append(entry[key])

    return ds                        

def create_huggingface_dataset(ds):
    huggingface_ds = DatasetDict({
        "train": Dataset.from_dict(ds["train"]),
        "val": Dataset.from_dict(ds["val"]),
        "test": Dataset.from_dict(ds["test"])
    })
    print(f"Dataset sizes: Train: {len(huggingface_ds['train'])}, Val: {len(huggingface_ds['val'])}, Test: {len(huggingface_ds['test'])}")

    for split in huggingface_ds:
        for col in huggingface_ds[split].column_names:
            if "video" in col:
                huggingface_ds[split] = huggingface_ds[split].cast_column(col, Video())

    return huggingface_ds

if __name__ == "__main__":
    ds = create_dataset_dict(Path("path/to/data/of/uncompressed/folders/of/subjects"))

    huggingface_ds = create_huggingface_dataset(ds)

    dataset_sizes = {
        "train": len(huggingface_ds["train"]),
        "val": len(huggingface_ds["val"]),
        "test": len(huggingface_ds["test"])
    }

    # huggingface_ds can be wrapped with any frameworks dataset, padding might be needed for efficient batching