--- task_categories: - translation language: - en --- # Structure Each sample will have a structure as follows: ``` { "id": , "type" , "view": , "text": , "video": , } ``` # How To Use Because the returned video will be in bytes, here is a way to extract frames and fps: ```python # pip install av import av import io import numpy as np import os from datasets import load_dataset def extract_frames(video_bytes): # Create a memory-mapped file from the bytes container = av.open(io.BytesIO(video_bytes)) # Find the video stream visual_stream = next(iter(container.streams.video), None) # Extract video properties video_fps = visual_stream.average_rate # Initialize arrays to store frames and audio frames_array = [] # Extract frames for packet in container.demux([visual_stream]): for frame in packet.decode(): img_array = np.array(frame.to_image()) frames_array.append(img_array) return frames_array, video_fps dataset = load_dataset("VieSignLang/how2sign-clips", split="test", streaming=True) sample = next(iter(dataset))["video"] frames, video_fps = extract_frames(sample) print(f"Number of frames: {frames.shape[0]}") print(f"Video FPS: {video_fps}") ```