Datasets:
Languages:
English
ArXiv:
Tags:
egocentric-vision
exocentric-vision
gaze-tracking
referential-expressions
cooking
spatial-reasoning
License:
| """ | |
| Look and Tell Referential Dataset Loader | |
| Example script for loading and using the dataset. | |
| """ | |
| from pathlib import Path | |
| import pandas as pd | |
| import json | |
| import cv2 | |
| from typing import Dict, Any, Optional | |
| class ARIAReferentialDataset: | |
| """ | |
| Loader for KTH-ARIA Referential Dataset | |
| Example usage: | |
| dataset = ARIAReferentialDataset('data') | |
| recording = dataset.load_recording('par_01', 'rec_01') | |
| """ | |
| def __init__(self, data_path: str): | |
| self.data_path = Path(data_path) | |
| self.metadata = self._load_metadata() | |
| self.recipes = self._load_recipes() | |
| def _load_metadata(self) -> pd.DataFrame: | |
| """Load dataset metadata""" | |
| metadata_path = self.data_path / 'manifests' / 'metadata.parquet' | |
| if metadata_path.exists(): | |
| return pd.read_parquet(metadata_path) | |
| else: | |
| # Fallback to CSV | |
| csv_path = self.data_path / 'manifests' / 'metadata.csv' | |
| return pd.read_csv(csv_path) | |
| def _load_recipes(self) -> Dict: | |
| """Load recipe information""" | |
| recipe_path = self.data_path / 'manifests' / 'recipes.json' | |
| with open(recipe_path) as f: | |
| return json.load(f) | |
| def get_recording_info(self, participant_id: str, recording_id: str) -> Dict[str, Any]: | |
| """Get metadata for a specific recording""" | |
| recording_uid = f"{participant_id}_{recording_id}" | |
| row = self.metadata[self.metadata['recording_uid'] == recording_uid] | |
| if len(row) == 0: | |
| raise ValueError(f"Recording {recording_uid} not found") | |
| return row.iloc[0].to_dict() | |
| def load_recording(self, participant_id: str, recording_id: str) -> Dict[str, Any]: | |
| """ | |
| Load all data for a recording | |
| Returns: | |
| Dictionary containing: | |
| - ego_video_path: Path to egocentric video | |
| - exo_video_path: Path to exocentric video | |
| - audio_path: Path to audio | |
| - gaze: Gaze tracking dataframe | |
| - references: Reference annotations dataframe | |
| - transcription: ASR transcription dataframe | |
| - recipe: Recipe information | |
| - metadata: Recording metadata | |
| """ | |
| # Get paths | |
| raw_path = self.data_path / participant_id / 'raw' / recording_id | |
| ann_path = self.data_path / participant_id / 'annotations' / 'v1' / recording_id | |
| # Get metadata | |
| info = self.get_recording_info(participant_id, recording_id) | |
| # Derive recipe_id from recording_num (recording_num 1 -> recipe_01, etc.) | |
| recipe_id = f"recipe_{int(info['recording_num']):02d}" | |
| # Load data | |
| result = { | |
| 'ego_video_path': str(raw_path / 'ego_video.mp4'), | |
| 'exo_video_path': str(raw_path / 'exo_video.mp4'), | |
| 'audio_path': str(raw_path / 'audio.wav'), | |
| 'metadata': info, | |
| 'recipe': self.recipes.get(recipe_id), | |
| } | |
| # Load gaze data if available | |
| gaze_path = raw_path / 'ego_gaze.csv' | |
| if gaze_path.exists(): | |
| result['gaze'] = pd.read_csv(gaze_path) | |
| # Load annotations if available | |
| ref_path = ann_path / 'references.csv' | |
| if ref_path.exists(): | |
| result['references'] = pd.read_csv(ref_path) | |
| trans_path = ann_path / 'whisperx_transcription.tsv' | |
| if trans_path.exists(): | |
| result['transcription'] = pd.read_csv(trans_path, sep='\t') | |
| return result | |
| def get_recordings_by_recipe(self, recipe_id: str) -> pd.DataFrame: | |
| """Get all recordings for a specific recipe | |
| Args: | |
| recipe_id: Recipe ID string (e.g., 'recipe_01', 'recipe_02', etc.) | |
| """ | |
| # Extract recipe number from recipe_id (e.g., 'recipe_01' -> 1) | |
| recipe_num = int(recipe_id.split('_')[1]) | |
| return self.metadata[self.metadata['recording_num'] == recipe_num] | |
| def get_participant_recordings(self, participant_id: str) -> pd.DataFrame: | |
| """Get all recordings for a specific participant""" | |
| return self.metadata[self.metadata['participant_id'] == participant_id] | |
| # Example usage | |
| if __name__ == "__main__": | |
| # Initialize dataset | |
| dataset = ARIAReferentialDataset('data') | |
| # Print dataset summary | |
| print(f"Total recordings: {len(dataset.metadata)}") | |
| print(f"Participants: {dataset.metadata['participant_id'].nunique()}") | |
| print(f"Recipes: {len(dataset.recipes) - 1}") # -1 for surface_map | |
| print() | |
| # Load a specific recording | |
| print("Loading par_01, rec_01...") | |
| recording = dataset.load_recording('par_01', 'rec_01') | |
| print(f"Recipe: {recording['recipe']['name']}") | |
| print(f"Duration: {recording['metadata']['duration_sec']:.1f}s") | |
| print(f"Has gaze: {recording['metadata']['has_gaze']}") | |
| print(f"References: {recording['metadata']['n_references']}") | |
| print() | |
| # Get all recordings for recipe 1 | |
| recipe_1 = dataset.get_recordings_by_recipe('recipe_01') | |
| print(f"Recipe 1 performed by {len(recipe_1)} participants") |