File size: 4,051 Bytes
67137fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb3313e
14cc141
 
 
 
 
 
 
aeff35c
 
67137fb
14cc141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d3caa18
 
 
67137fb
d3caa18
 
 
 
14cc141
 
67137fb
14cc141
 
 
 
 
 
d3caa18
14cc141
b38de6f
 
14cc141
 
 
 
 
 
d3caa18
14cc141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
---
configs:
  - config_name: all
    data_files:
      - split: train
        path: "*-train.tar"
    default: true
language: ins
license: cc-by-sa-4.0
datasets:
- bridgeconn/sign-dictionary-isl
tags:
- video
- parallel-corpus
- low-resource-languages
---


# Dataset Card for Sign Dictionary Dataset

This dataset contains Indian sign language videos with one gloss per video. There are 3077 seperate lex items or glosses included.
The dataset is licensed under the Creative Commons Attribution-ShareAlike 4.0 International License (CC BY-SA 4.0).


## Dataset Details

There is a total of 2.5 hours of sign videos.

## Dataset Description

- Segmented sign videos
- Pose estimation data in the following formats
  - skeletal video
  - Frames wise body landmarks detected by dwpose as a numpy array
  - Frames wise body landmarks detected by mediapose as .pose format

## How to use

```python
import webdataset as wds
import numpy as np
import json
import tempfile
import os
import cv2


def main():
	buffer_size = 1024
	dataset = (
	    wds.WebDataset(
            "https://huggingface.co/datasets/bridgeconn/sign-dictionary-isl/resolve/main/shard_{00001..00002}-train.tar",
            shardshuffle=False)
	    .shuffle(buffer_size)
	    .decode()
	)
    for sample in dataset:
		''' Each sample contains:
			 'mp4', 
			 'pose-dwpose.npz', 'pose-mediapipe.pose'
			 and 'json'
		'''
		# print(sample.keys())

		# JSON metadata
        json_data = sample['json']
		print(json_data['filename']) 
		print(json_data['transcripts'])
        print(json_data['glosses'])

		# main video
		mp4_data = sample['mp4']
		process_video(mp4_data)
		
		# dwpose results
        dwpose_coords = sample["pose-dwpose.npz"] 

		frame_poses = dwpose_coords['frames'].tolist()
		print(f"Frames in dwpose coords: {len(frame_poses)} poses")
		print(f"Pose coords shape: {len(frame_poses[0][0])}")
		print(f"One point looks like [x,y]: {frame_poses[0][0][0]}")

		# mediapipe results in .pose format
		pose_format_data = sample["pose-mediapipe.pose"]
		process_poseformat(pose_format_data)

		break


def process_poseformat(pose_format_data):
	from pose_format import Pose
	temp_file = None
	try:
		with tempfile.NamedTemporaryFile(suffix=".pose", delete=False) as tmp:
			tmp.write(pose_format_data)
			temp_file = tmp.name

		data_buffer = open(temp_file, "rb").read()
		pose = Pose.read(data_buffer)

		print(f"Mediapipe results from pose-format: {pose.body.data.shape}")
	except Exception as e:
		print(f"Error processing pose-format: {e}")
	finally:
		if temp_file and os.path.exists(temp_file):
			os.remove(temp_file) # Clean up the temporary file


def process_video(mp4_data):
	print(f"Video bytes length: {len(mp4_data)} bytes")

	temp_file = None
	try:
		# Processing video from temporary file
		with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp:
			tmp.write(mp4_data)
			temp_file = tmp.name

		cap = cv2.VideoCapture(temp_file)

		if not cap.isOpened():
			raise IOError(f"Could not open video file: {temp_file}")

		# Example: Get video metadata
		frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
		fps = cap.get(cv2.CAP_PROP_FPS)
		width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
		height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

		print(f"Video Info: {frame_count} frames, {fps:.2f} FPS, {width}x{height}")

		# Example: Read and display the first frame (or process as needed)
		ret, frame = cap.read()
		if ret:
			print(f"First frame shape: {frame.shape}, dtype: {frame.dtype}")
			# You can then use this frame for further processing, e.g.,
			frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
			import matplotlib.pyplot as plt
			plt.imshow(frame_rgb)
			plt.title(f"Sample First Frame")
			plt.show()
		else:
			print("Could not read first frame.")

		cap.release()

	except Exception as e:
		print(f"Error processing external MP4: {e}")
	finally:
		if temp_file and os.path.exists(temp_file):
			os.remove(temp_file) # Clean up the temporary file


if __name__ == '__main__':
	main()
```


---
license: cc-by-sa-4.0
---