tanthinhdt commited on
Commit
81885b2
·
1 Parent(s): 7851027

docs: add README

Browse files
Files changed (1) hide show
  1. README.md +58 -0
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ task_categories:
3
+ - translation
4
+ language:
5
+ - en
6
+ ---
7
+ # Structure
8
+ Each sample will have a structure as follows:
9
+ ```
10
+ {
11
+ "id": <id-of-sample>,
12
+ "type" <rgb-or-keypoints-data>,
13
+ "view": <frontal-or-side-view>,
14
+ "text": <translation-of-sample-in-spoken-language>,
15
+ "video": <video-in-bytes>,
16
+ }
17
+ ```
18
+
19
+ # How To Use
20
+ Because the returned video will be in bytes, here is a way to extract frames and fps:
21
+ ```
22
+ # pip install av
23
+
24
+ import av
25
+ import io
26
+ import numpy as np
27
+ import os
28
+ from datasets import load_dataset
29
+
30
+
31
+ def extract_frames(video_bytes):
32
+ # Create a memory-mapped file from the bytes
33
+ container = av.open(io.BytesIO(video_bytes))
34
+
35
+ # Find the video stream
36
+ visual_stream = next(iter(container.streams.video), None)
37
+
38
+ # Extract video properties
39
+ video_fps = visual_stream.average_rate
40
+
41
+ # Initialize arrays to store frames and audio
42
+ frames_array = []
43
+
44
+ # Extract frames
45
+ for packet in container.demux([visual_stream]):
46
+ for frame in packet.decode():
47
+ img_array = np.array(frame.to_image())
48
+ frames_array.append(img_array)
49
+
50
+ return frames_array, video_fps
51
+
52
+
53
+ dataset = load_dataset("VieSignLang/phoenix14-t", split="test", streaming=True)
54
+ sample = next(iter(dataset))["video"]
55
+ frames, video_fps = extract_frames(sample)
56
+ print(f"Number of frames: {frames.shape[0]}")
57
+ print(f"Video FPS: {video_fps}")
58
+ ```