Datasets:
Change Readme, as per the easier loading of .npz file
Browse files
README.md
CHANGED
|
@@ -28,7 +28,14 @@ import cv2
|
|
| 28 |
|
| 29 |
|
| 30 |
def main():
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
for sample in dataset:
|
| 33 |
''' Each sample contains:
|
| 34 |
'mp4', 'pose-animation.mp4',
|
|
@@ -38,7 +45,7 @@ def main():
|
|
| 38 |
# print(sample.keys())
|
| 39 |
|
| 40 |
# JSON metadata
|
| 41 |
-
json_str = sample['json']
|
| 42 |
json_data = json.loads(json_str)
|
| 43 |
print(json_data['filename'])
|
| 44 |
print(json_data['transcripts'])
|
|
@@ -53,8 +60,7 @@ def main():
|
|
| 53 |
process_video(pose_data)
|
| 54 |
|
| 55 |
# dwpose results
|
| 56 |
-
|
| 57 |
-
dwpose_coords = = np.load(BytesIO(npz_bytes))
|
| 58 |
|
| 59 |
frame_poses = dwpose_coords['frames'].tolist()
|
| 60 |
print(f"Frames in dwpose coords: {len(frame_poses)} poses")
|
|
|
|
| 28 |
|
| 29 |
|
| 30 |
def main():
|
| 31 |
+
buffer_size = 1024
|
| 32 |
+
dataset = (
|
| 33 |
+
wds.WebDataset(
|
| 34 |
+
"https://huggingface.co/datasets/bridgeconn/sign-dictionary-isl/resolve/main/chunk_{00001..00002}.tar",
|
| 35 |
+
shardshuffle=False)
|
| 36 |
+
.shuffle(buffer_size)
|
| 37 |
+
.decode()
|
| 38 |
+
)
|
| 39 |
for sample in dataset:
|
| 40 |
''' Each sample contains:
|
| 41 |
'mp4', 'pose-animation.mp4',
|
|
|
|
| 45 |
# print(sample.keys())
|
| 46 |
|
| 47 |
# JSON metadata
|
| 48 |
+
json_str = sample['json']
|
| 49 |
json_data = json.loads(json_str)
|
| 50 |
print(json_data['filename'])
|
| 51 |
print(json_data['transcripts'])
|
|
|
|
| 60 |
process_video(pose_data)
|
| 61 |
|
| 62 |
# dwpose results
|
| 63 |
+
dwpose_coords = sample["pose-dwpose.npz"]
|
|
|
|
| 64 |
|
| 65 |
frame_poses = dwpose_coords['frames'].tolist()
|
| 66 |
print(f"Frames in dwpose coords: {len(frame_poses)} poses")
|